2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <rdma/ib_umem.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/interval_tree.h>
40 struct umem_odp_node {
47 struct ib_ucontext_per_mm *per_mm;
50 * An array of the pages included in the on-demand paging umem.
51 * Indices of pages that are currently not mapped into the device will
54 struct page **page_list;
56 * An array of the same size as page_list, with DMA addresses mapped
57 * for pages the pages in page_list. The lower two bits designate
58 * access permissions. See ODP_READ_ALLOWED_BIT and
59 * ODP_WRITE_ALLOWED_BIT.
63 * The umem_mutex protects the page_list and dma_list fields of an ODP
64 * umem, allowing only a single thread to map/unmap pages. The mutex
65 * also protects access to the mmu notifier counters.
67 struct mutex umem_mutex;
68 void *private; /* for the HW driver to use. */
70 /* When false, use the notifier counter in the ucontext struct. */
71 bool mn_counters_active;
75 /* A linked list of umems that don't have private mmu notifier
77 struct list_head no_private_counters;
80 struct umem_odp_node interval_tree;
82 struct completion notifier_completion;
84 struct work_struct work;
87 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
89 return container_of(umem, struct ib_umem_odp, umem);
92 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
94 struct ib_ucontext_per_mm {
95 struct ib_ucontext *context;
99 struct rb_root_cached umem_tree;
100 /* Protects umem_tree */
101 struct rw_semaphore umem_rwsem;
102 atomic_t notifier_count;
104 struct mmu_notifier mn;
105 /* A list of umems that don't have private mmu notifier counters yet. */
106 struct list_head no_private_counters;
107 unsigned int odp_mrs_count;
109 struct list_head ucontext_list;
112 int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
113 struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
114 unsigned long addr, size_t size);
115 void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
118 * The lower 2 bits of the DMA address signal the R/W permissions for
119 * the entry. To upgrade the permissions, provide the appropriate
120 * bitmask to the map_dma_pages function.
122 * Be aware that upgrading a mapped address might result in change of
123 * the DMA address for the page.
125 #define ODP_READ_ALLOWED_BIT (1<<0ULL)
126 #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
128 #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
130 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
131 u64 bcnt, u64 access_mask,
132 unsigned long current_seq);
134 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
137 typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
140 * Call the callback on each ib_umem in the range. Returns the logical or of
141 * the return values of the functions called.
143 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
146 bool blockable, void *cookie);
149 * Find first region intersecting with address range.
150 * Return NULL if not found
152 struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
153 u64 addr, u64 length);
155 static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
156 unsigned long mmu_seq)
159 * This code is strongly based on the KVM code from
160 * mmu_notifier_retry. Should be called with
161 * the relevant locks taken (umem_odp->umem_mutex
162 * and the ucontext umem_mutex semaphore locked for read).
165 /* Do not allow page faults while the new ib_umem hasn't seen a state
166 * with zero notifiers yet, and doesn't have its own valid set of
167 * private counters. */
168 if (!umem_odp->mn_counters_active)
171 if (unlikely(umem_odp->notifiers_count))
173 if (umem_odp->notifiers_seq != mmu_seq)
178 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
180 static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
185 static inline struct ib_umem_odp *
186 ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size)
188 return ERR_PTR(-EINVAL);
191 static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
193 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
195 #endif /* IB_UMEM_ODP_H */