1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
7 #include <rdma/ib_verbs.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/slab.h>
10 #include <linux/sched/mm.h>
11 #include <linux/resource.h>
17 * Stag lookup is based on its index part only (24 bits).
18 * The code avoids special Stag of zero and tries to randomize
19 * STag values between 1 and SIW_STAG_MAX_INDEX.
21 int siw_mem_add(struct siw_device *sdev, struct siw_mem *m)
23 struct xa_limit limit = XA_LIMIT(1, 0x00ffffff);
26 get_random_bytes(&next, 4);
29 if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next,
33 /* Set the STag index part */
36 siw_dbg_mem(m, "new MEM object\n");
44 * resolves memory from stag given by id. might be called from:
45 * o process context before sending out of sgl, or
46 * o in softirq when resolving target memory
48 struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
53 mem = xa_load(&sdev->mem_xa, stag_index);
54 if (likely(mem && kref_get_unless_zero(&mem->ref))) {
63 static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
66 struct page **p = chunk->plist;
69 if (!PageDirty(*p) && dirty)
70 put_user_pages_dirty_lock(p, 1);
77 void siw_umem_release(struct siw_umem *umem, bool dirty)
79 struct mm_struct *mm_s = umem->owning_mm;
80 int i, num_pages = umem->num_pages;
82 for (i = 0; num_pages; i++) {
83 int to_free = min_t(int, PAGES_PER_CHUNK, num_pages);
85 siw_free_plist(&umem->page_chunk[i], to_free,
86 umem->writable && dirty);
87 kfree(umem->page_chunk[i].plist);
90 atomic64_sub(umem->num_pages, &mm_s->pinned_vm);
93 kfree(umem->page_chunk);
97 int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
98 u64 start, u64 len, int rights)
100 struct siw_device *sdev = to_siw_dev(pd->device);
101 struct siw_mem *mem = kzalloc(sizeof(*mem), GFP_KERNEL);
102 struct xa_limit limit = XA_LIMIT(1, 0x00ffffff);
108 mem->mem_obj = mem_obj;
114 mem->perms = rights & IWARP_ACCESS_MASK;
115 kref_init(&mem->ref);
119 get_random_bytes(&next, 4);
122 if (xa_alloc_cyclic(&sdev->mem_xa, &id, mem, limit, &next,
127 /* Set the STag index part */
129 mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
134 void siw_mr_drop_mem(struct siw_mr *mr)
136 struct siw_mem *mem = mr->mem, *found;
140 /* make STag invalid visible asap */
143 found = xa_erase(&mem->sdev->mem_xa, mem->stag >> 8);
144 WARN_ON(found != mem);
148 void siw_free_mem(struct kref *ref)
150 struct siw_mem *mem = container_of(ref, struct siw_mem, ref);
152 siw_dbg_mem(mem, "free mem, pbl: %s\n", mem->is_pbl ? "y" : "n");
154 if (!mem->is_mw && mem->mem_obj) {
155 if (mem->is_pbl == 0)
156 siw_umem_release(mem->umem, true);
166 * Check protection domain, STAG state, access permissions and
167 * address range for memory object.
169 * @pd: Protection Domain memory should belong to
170 * @mem: memory to be checked
171 * @addr: starting addr of mem
172 * @perms: requested access permissions
173 * @len: len of memory interval to be checked
176 int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
177 enum ib_access_flags perms, int len)
179 if (!mem->stag_valid) {
180 siw_dbg_pd(pd, "STag 0x%08x invalid\n", mem->stag);
181 return -E_STAG_INVALID;
184 siw_dbg_pd(pd, "STag 0x%08x: PD mismatch\n", mem->stag);
185 return -E_PD_MISMATCH;
188 * check access permissions
190 if ((mem->perms & perms) < perms) {
191 siw_dbg_pd(pd, "permissions 0x%08x < 0x%08x\n",
193 return -E_ACCESS_PERM;
196 * Check if access falls into valid memory interval.
198 if (addr < mem->va || addr + len > mem->va + mem->len) {
199 siw_dbg_pd(pd, "MEM interval len %d\n", len);
200 siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
201 (void *)(uintptr_t)addr,
202 (void *)(uintptr_t)(addr + len));
203 siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
204 (void *)(uintptr_t)mem->va,
205 (void *)(uintptr_t)(mem->va + mem->len),
208 return -E_BASE_BOUNDS;
216 * Check SGE for access rights in given interval
218 * @pd: Protection Domain memory should belong to
219 * @sge: SGE to be checked
220 * @mem: location of memory reference within array
221 * @perms: requested access permissions
222 * @off: starting offset in SGE
223 * @len: len of memory interval to be checked
225 * NOTE: Function references SGE's memory object (mem->obj)
226 * if not yet done. New reference is kept if check went ok and
227 * released if check failed. If mem->obj is already valid, no new
228 * lookup is being done and mem is not released it check fails.
230 int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge, struct siw_mem *mem[],
231 enum ib_access_flags perms, u32 off, int len)
233 struct siw_device *sdev = to_siw_dev(pd->device);
234 struct siw_mem *new = NULL;
235 int rv = E_ACCESS_OK;
237 if (len + off > sge->length) {
242 new = siw_mem_id2obj(sdev, sge->lkey >> 8);
243 if (unlikely(!new)) {
244 siw_dbg_pd(pd, "STag unknown: 0x%08x\n", sge->lkey);
245 rv = -E_STAG_INVALID;
250 /* Check if user re-registered with different STag key */
251 if (unlikely((*mem)->stag != sge->lkey)) {
252 siw_dbg_mem((*mem), "STag mismatch: 0x%08x\n", sge->lkey);
253 rv = -E_STAG_INVALID;
256 rv = siw_check_mem(pd, *mem, sge->laddr + off, perms, len);
270 void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op)
275 case SIW_OP_SEND_WITH_IMM:
276 case SIW_OP_SEND_REMOTE_INV:
278 case SIW_OP_READ_LOCAL_INV:
279 if (!(wqe->sqe.flags & SIW_WQE_INLINE))
280 siw_unref_mem_sgl(wqe->mem, wqe->sqe.num_sge);
284 siw_unref_mem_sgl(wqe->mem, wqe->rqe.num_sge);
287 case SIW_OP_READ_RESPONSE:
288 siw_unref_mem_sgl(wqe->mem, 1);
293 * SIW_OP_INVAL_STAG and SIW_OP_REG_MR
294 * do not hold memory references
300 int siw_invalidate_stag(struct ib_pd *pd, u32 stag)
302 struct siw_device *sdev = to_siw_dev(pd->device);
303 struct siw_mem *mem = siw_mem_id2obj(sdev, stag >> 8);
306 if (unlikely(!mem)) {
307 siw_dbg_pd(pd, "STag 0x%08x unknown\n", stag);
310 if (unlikely(mem->pd != pd)) {
311 siw_dbg_pd(pd, "PD mismatch for STag 0x%08x\n", stag);
316 * Per RDMA verbs definition, an STag may already be in invalid
317 * state if invalidation is requested. So no state check here.
321 siw_dbg_pd(pd, "STag 0x%08x now invalid\n", stag);
328 * Gets physical address backed by PBL element. Address is referenced
329 * by linear byte offset into list of variably sized PB elements.
330 * Optionally, provides remaining len within current element, and
331 * current PBL index for later resume at same element.
333 dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
335 int i = idx ? *idx : 0;
337 while (i < pbl->num_buf) {
338 struct siw_pble *pble = &pbl->pbe[i];
340 if (pble->pbl_off + pble->size > off) {
341 u64 pble_off = off - pble->pbl_off;
344 *len = pble->size - pble_off;
348 return pble->addr + pble_off;
357 struct siw_pbl *siw_pbl_alloc(u32 num_buf)
360 int buf_size = sizeof(*pbl);
363 return ERR_PTR(-EINVAL);
365 buf_size += ((num_buf - 1) * sizeof(struct siw_pble));
367 pbl = kzalloc(buf_size, GFP_KERNEL);
369 return ERR_PTR(-ENOMEM);
371 pbl->max_buf = num_buf;
376 struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
378 struct siw_umem *umem;
379 struct mm_struct *mm_s;
381 unsigned long mlock_limit;
382 unsigned int foll_flags = FOLL_WRITE;
383 int num_pages, num_chunks, i, rv = 0;
386 return ERR_PTR(-EPERM);
389 return ERR_PTR(-EINVAL);
391 first_page_va = start & PAGE_MASK;
392 num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT;
393 num_chunks = (num_pages >> CHUNK_SHIFT) + 1;
395 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
397 return ERR_PTR(-ENOMEM);
400 umem->owning_mm = mm_s;
401 umem->writable = writable;
406 foll_flags |= FOLL_FORCE;
408 down_read(&mm_s->mmap_sem);
410 mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
412 if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
416 umem->fp_addr = first_page_va;
419 kcalloc(num_chunks, sizeof(struct siw_page_chunk), GFP_KERNEL);
420 if (!umem->page_chunk) {
424 for (i = 0; num_pages; i++) {
425 int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
427 umem->page_chunk[i].plist =
428 kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
429 if (!umem->page_chunk[i].plist) {
435 struct page **plist = &umem->page_chunk[i].plist[got];
437 rv = get_user_pages(first_page_va, nents,
438 foll_flags | FOLL_LONGTERM,
443 umem->num_pages += rv;
444 atomic64_add(rv, &mm_s->pinned_vm);
445 first_page_va += rv * PAGE_SIZE;
452 up_read(&mm_s->mmap_sem);
457 siw_umem_release(umem, false);