2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
50 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
52 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
53 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
54 static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
59 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
64 static bool use_umr(struct mlx5_ib_dev *dev, int order)
66 return order <= mr_cache_max_order(dev) &&
67 umr_can_modify_entity_size(dev);
70 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
72 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
74 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
75 /* Wait until all page fault handlers using the mr complete. */
76 synchronize_srcu(&dev->mr_srcu);
81 static int order2idx(struct mlx5_ib_dev *dev, int order)
83 struct mlx5_mr_cache *cache = &dev->cache;
85 if (order < cache->ent[0].order)
88 return order - cache->ent[0].order;
91 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
93 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
94 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
97 static void update_odp_mr(struct mlx5_ib_mr *mr)
101 * This barrier prevents the compiler from moving the
102 * setting of umem->odp_data->private to point to our
103 * MR, before reg_umr finished, to ensure that the MR
104 * initialization have finished before starting to
105 * handle invalidations.
108 to_ib_umem_odp(mr->umem)->private = mr;
110 * Make sure we will see the new
111 * umem->odp_data->private value in the invalidation
112 * routines, before we can get page faults on the
113 * MR. Page faults can happen once we put the MR in
114 * the tree, below this line. Without the barrier,
115 * there can be a fault handling and an invalidation
116 * before umem->odp_data->private == mr is visible to
117 * the invalidation handler.
123 static void reg_mr_callback(int status, struct mlx5_async_work *context)
125 struct mlx5_ib_mr *mr =
126 container_of(context, struct mlx5_ib_mr, cb_work);
127 struct mlx5_ib_dev *dev = mr->dev;
128 struct mlx5_mr_cache *cache = &dev->cache;
129 int c = order2idx(dev, mr->order);
130 struct mlx5_cache_ent *ent = &cache->ent[c];
133 struct xarray *mkeys = &dev->mdev->priv.mkey_table;
136 spin_lock_irqsave(&ent->lock, flags);
138 spin_unlock_irqrestore(&ent->lock, flags);
140 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
143 mod_timer(&dev->delay_timer, jiffies + HZ);
147 mr->mmkey.type = MLX5_MKEY_MR;
148 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
149 key = dev->mdev->priv.mkey_key++;
150 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
151 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
153 cache->last_add = jiffies;
155 spin_lock_irqsave(&ent->lock, flags);
156 list_add_tail(&mr->list, &ent->head);
159 spin_unlock_irqrestore(&ent->lock, flags);
161 xa_lock_irqsave(mkeys, flags);
162 err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
163 &mr->mmkey, GFP_ATOMIC));
164 xa_unlock_irqrestore(mkeys, flags);
166 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
168 if (!completion_done(&ent->compl))
169 complete(&ent->compl);
172 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
174 struct mlx5_mr_cache *cache = &dev->cache;
175 struct mlx5_cache_ent *ent = &cache->ent[c];
176 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
177 struct mlx5_ib_mr *mr;
183 in = kzalloc(inlen, GFP_KERNEL);
187 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
188 for (i = 0; i < num; i++) {
189 if (ent->pending >= MAX_PENDING_REG_MR) {
194 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
199 mr->order = ent->order;
200 mr->allocated_from_cache = 1;
203 MLX5_SET(mkc, mkc, free, 1);
204 MLX5_SET(mkc, mkc, umr_en, 1);
205 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
206 MLX5_SET(mkc, mkc, access_mode_4_2,
207 (ent->access_mode >> 2) & 0x7);
209 MLX5_SET(mkc, mkc, qpn, 0xffffff);
210 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
211 MLX5_SET(mkc, mkc, log_page_size, ent->page);
213 spin_lock_irq(&ent->lock);
215 spin_unlock_irq(&ent->lock);
216 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
217 &dev->async_ctx, in, inlen,
218 mr->out, sizeof(mr->out),
219 reg_mr_callback, &mr->cb_work);
221 spin_lock_irq(&ent->lock);
223 spin_unlock_irq(&ent->lock);
224 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
234 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
236 struct mlx5_mr_cache *cache = &dev->cache;
237 struct mlx5_cache_ent *ent = &cache->ent[c];
238 struct mlx5_ib_mr *tmp_mr;
239 struct mlx5_ib_mr *mr;
243 for (i = 0; i < num; i++) {
244 spin_lock_irq(&ent->lock);
245 if (list_empty(&ent->head)) {
246 spin_unlock_irq(&ent->lock);
249 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
250 list_move(&mr->list, &del_list);
253 spin_unlock_irq(&ent->lock);
254 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
257 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
258 synchronize_srcu(&dev->mr_srcu);
260 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
266 static ssize_t size_write(struct file *filp, const char __user *buf,
267 size_t count, loff_t *pos)
269 struct mlx5_cache_ent *ent = filp->private_data;
270 struct mlx5_ib_dev *dev = ent->dev;
276 count = min(count, sizeof(lbuf) - 1);
277 if (copy_from_user(lbuf, buf, count))
280 c = order2idx(dev, ent->order);
282 if (sscanf(lbuf, "%u", &var) != 1)
285 if (var < ent->limit)
288 if (var > ent->size) {
290 err = add_keys(dev, c, var - ent->size);
291 if (err && err != -EAGAIN)
294 usleep_range(3000, 5000);
296 } else if (var < ent->size) {
297 remove_keys(dev, c, ent->size - var);
303 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
306 struct mlx5_cache_ent *ent = filp->private_data;
310 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
314 return simple_read_from_buffer(buf, count, pos, lbuf, err);
317 static const struct file_operations size_fops = {
318 .owner = THIS_MODULE,
324 static ssize_t limit_write(struct file *filp, const char __user *buf,
325 size_t count, loff_t *pos)
327 struct mlx5_cache_ent *ent = filp->private_data;
328 struct mlx5_ib_dev *dev = ent->dev;
334 count = min(count, sizeof(lbuf) - 1);
335 if (copy_from_user(lbuf, buf, count))
338 c = order2idx(dev, ent->order);
340 if (sscanf(lbuf, "%u", &var) != 1)
348 if (ent->cur < ent->limit) {
349 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
357 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
360 struct mlx5_cache_ent *ent = filp->private_data;
364 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
368 return simple_read_from_buffer(buf, count, pos, lbuf, err);
371 static const struct file_operations limit_fops = {
372 .owner = THIS_MODULE,
374 .write = limit_write,
378 static int someone_adding(struct mlx5_mr_cache *cache)
382 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
383 if (cache->ent[i].cur < cache->ent[i].limit)
390 static void __cache_work_func(struct mlx5_cache_ent *ent)
392 struct mlx5_ib_dev *dev = ent->dev;
393 struct mlx5_mr_cache *cache = &dev->cache;
394 int i = order2idx(dev, ent->order);
400 ent = &dev->cache.ent[i];
401 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
402 err = add_keys(dev, i, 1);
403 if (ent->cur < 2 * ent->limit) {
404 if (err == -EAGAIN) {
405 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
407 queue_delayed_work(cache->wq, &ent->dwork,
408 msecs_to_jiffies(3));
410 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
412 queue_delayed_work(cache->wq, &ent->dwork,
413 msecs_to_jiffies(1000));
415 queue_work(cache->wq, &ent->work);
418 } else if (ent->cur > 2 * ent->limit) {
420 * The remove_keys() logic is performed as garbage collection
421 * task. Such task is intended to be run when no other active
422 * processes are running.
424 * The need_resched() will return TRUE if there are user tasks
425 * to be activated in near future.
427 * In such case, we don't execute remove_keys() and postpone
428 * the garbage collection work to try to run in next cycle,
429 * in order to free CPU resources to other tasks.
431 if (!need_resched() && !someone_adding(cache) &&
432 time_after(jiffies, cache->last_add + 300 * HZ)) {
433 remove_keys(dev, i, 1);
434 if (ent->cur > ent->limit)
435 queue_work(cache->wq, &ent->work);
437 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
442 static void delayed_cache_work_func(struct work_struct *work)
444 struct mlx5_cache_ent *ent;
446 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
447 __cache_work_func(ent);
450 static void cache_work_func(struct work_struct *work)
452 struct mlx5_cache_ent *ent;
454 ent = container_of(work, struct mlx5_cache_ent, work);
455 __cache_work_func(ent);
458 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
460 struct mlx5_mr_cache *cache = &dev->cache;
461 struct mlx5_cache_ent *ent;
462 struct mlx5_ib_mr *mr;
465 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
466 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
470 ent = &cache->ent[entry];
472 spin_lock_irq(&ent->lock);
473 if (list_empty(&ent->head)) {
474 spin_unlock_irq(&ent->lock);
476 err = add_keys(dev, entry, 1);
477 if (err && err != -EAGAIN)
480 wait_for_completion(&ent->compl);
482 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
486 spin_unlock_irq(&ent->lock);
487 if (ent->cur < ent->limit)
488 queue_work(cache->wq, &ent->work);
494 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
496 struct mlx5_mr_cache *cache = &dev->cache;
497 struct mlx5_ib_mr *mr = NULL;
498 struct mlx5_cache_ent *ent;
499 int last_umr_cache_entry;
503 c = order2idx(dev, order);
504 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
505 if (c < 0 || c > last_umr_cache_entry) {
506 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
510 for (i = c; i <= last_umr_cache_entry; i++) {
511 ent = &cache->ent[i];
513 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
515 spin_lock_irq(&ent->lock);
516 if (!list_empty(&ent->head)) {
517 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
521 spin_unlock_irq(&ent->lock);
522 if (ent->cur < ent->limit)
523 queue_work(cache->wq, &ent->work);
526 spin_unlock_irq(&ent->lock);
528 queue_work(cache->wq, &ent->work);
532 cache->ent[c].miss++;
537 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
539 struct mlx5_mr_cache *cache = &dev->cache;
540 struct mlx5_cache_ent *ent;
544 if (!mr->allocated_from_cache)
547 c = order2idx(dev, mr->order);
548 WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
550 if (unreg_umr(dev, mr)) {
551 mr->allocated_from_cache = false;
552 destroy_mkey(dev, mr);
553 ent = &cache->ent[c];
554 if (ent->cur < ent->limit)
555 queue_work(cache->wq, &ent->work);
559 ent = &cache->ent[c];
560 spin_lock_irq(&ent->lock);
561 list_add_tail(&mr->list, &ent->head);
563 if (ent->cur > 2 * ent->limit)
565 spin_unlock_irq(&ent->lock);
568 queue_work(cache->wq, &ent->work);
571 static void clean_keys(struct mlx5_ib_dev *dev, int c)
573 struct mlx5_mr_cache *cache = &dev->cache;
574 struct mlx5_cache_ent *ent = &cache->ent[c];
575 struct mlx5_ib_mr *tmp_mr;
576 struct mlx5_ib_mr *mr;
579 cancel_delayed_work(&ent->dwork);
581 spin_lock_irq(&ent->lock);
582 if (list_empty(&ent->head)) {
583 spin_unlock_irq(&ent->lock);
586 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
587 list_move(&mr->list, &del_list);
590 spin_unlock_irq(&ent->lock);
591 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
594 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
595 synchronize_srcu(&dev->mr_srcu);
598 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
604 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
606 if (!mlx5_debugfs_root || dev->is_rep)
609 debugfs_remove_recursive(dev->cache.root);
610 dev->cache.root = NULL;
613 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
615 struct mlx5_mr_cache *cache = &dev->cache;
616 struct mlx5_cache_ent *ent;
620 if (!mlx5_debugfs_root || dev->is_rep)
623 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
625 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
626 ent = &cache->ent[i];
627 sprintf(ent->name, "%d", ent->order);
628 dir = debugfs_create_dir(ent->name, cache->root);
629 debugfs_create_file("size", 0600, dir, ent, &size_fops);
630 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
631 debugfs_create_u32("cur", 0400, dir, &ent->cur);
632 debugfs_create_u32("miss", 0600, dir, &ent->miss);
636 static void delay_time_func(struct timer_list *t)
638 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
643 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
645 struct mlx5_mr_cache *cache = &dev->cache;
646 struct mlx5_cache_ent *ent;
649 mutex_init(&dev->slow_path_mutex);
650 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
652 mlx5_ib_warn(dev, "failed to create work queue\n");
656 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
657 timer_setup(&dev->delay_timer, delay_time_func, 0);
658 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
659 ent = &cache->ent[i];
660 INIT_LIST_HEAD(&ent->head);
661 spin_lock_init(&ent->lock);
666 init_completion(&ent->compl);
667 INIT_WORK(&ent->work, cache_work_func);
668 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
670 if (i > MR_CACHE_LAST_STD_ENTRY) {
671 mlx5_odp_init_mr_cache_entry(ent);
675 if (ent->order > mr_cache_max_order(dev))
678 ent->page = PAGE_SHIFT;
679 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
680 MLX5_IB_UMR_OCTOWORD;
681 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
682 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
684 mlx5_core_is_pf(dev->mdev))
685 ent->limit = dev->mdev->profile->mr_cache[i].limit;
688 queue_work(cache->wq, &ent->work);
691 mlx5_mr_cache_debugfs_init(dev);
696 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
703 dev->cache.stopped = 1;
704 flush_workqueue(dev->cache.wq);
706 mlx5_mr_cache_debugfs_cleanup(dev);
707 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
709 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
712 destroy_workqueue(dev->cache.wq);
713 del_timer_sync(&dev->delay_timer);
718 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
720 struct mlx5_ib_dev *dev = to_mdev(pd->device);
721 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
722 struct mlx5_core_dev *mdev = dev->mdev;
723 struct mlx5_ib_mr *mr;
728 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
730 return ERR_PTR(-ENOMEM);
732 in = kzalloc(inlen, GFP_KERNEL);
738 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
740 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
741 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
742 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
743 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
744 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
745 MLX5_SET(mkc, mkc, lr, 1);
747 MLX5_SET(mkc, mkc, length64, 1);
748 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
749 MLX5_SET(mkc, mkc, qpn, 0xffffff);
750 MLX5_SET64(mkc, mkc, start_addr, 0);
752 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
757 mr->mmkey.type = MLX5_MKEY_MR;
758 mr->ibmr.lkey = mr->mmkey.key;
759 mr->ibmr.rkey = mr->mmkey.key;
773 static int get_octo_len(u64 addr, u64 len, int page_shift)
775 u64 page_size = 1ULL << page_shift;
779 offset = addr & (page_size - 1);
780 npages = ALIGN(len + offset, page_size) >> page_shift;
781 return (npages + 1) / 2;
784 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
786 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
787 return MR_CACHE_LAST_STD_ENTRY + 2;
788 return MLX5_MAX_UMR_SHIFT;
791 static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
792 u64 start, u64 length, int access_flags,
793 struct ib_umem **umem, int *npages, int *page_shift,
794 int *ncont, int *order)
801 u = ib_umem_get(udata, start, length, access_flags, 0);
802 err = PTR_ERR_OR_ZERO(u);
804 mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
808 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
809 page_shift, ncont, order);
811 mlx5_ib_warn(dev, "avoid zero region\n");
818 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
819 *npages, *ncont, *order, *page_shift);
824 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
826 struct mlx5_ib_umr_context *context =
827 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
829 context->status = wc->status;
830 complete(&context->done);
833 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
835 context->cqe.done = mlx5_ib_umr_done;
836 context->status = -1;
837 init_completion(&context->done);
840 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
841 struct mlx5_umr_wr *umrwr)
843 struct umr_common *umrc = &dev->umrc;
844 const struct ib_send_wr *bad;
846 struct mlx5_ib_umr_context umr_context;
848 mlx5_ib_init_umr_context(&umr_context);
849 umrwr->wr.wr_cqe = &umr_context.cqe;
852 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
854 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
856 wait_for_completion(&umr_context.done);
857 if (umr_context.status != IB_WC_SUCCESS) {
858 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
867 static struct mlx5_ib_mr *alloc_mr_from_cache(
868 struct ib_pd *pd, struct ib_umem *umem,
869 u64 virt_addr, u64 len, int npages,
870 int page_shift, int order, int access_flags)
872 struct mlx5_ib_dev *dev = to_mdev(pd->device);
873 struct mlx5_ib_mr *mr;
877 for (i = 0; i < 1; i++) {
878 mr = alloc_cached_mr(dev, order);
882 err = add_keys(dev, order2idx(dev, order), 1);
883 if (err && err != -EAGAIN) {
884 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
890 return ERR_PTR(-EAGAIN);
894 mr->access_flags = access_flags;
895 mr->desc_size = sizeof(struct mlx5_mtt);
896 mr->mmkey.iova = virt_addr;
897 mr->mmkey.size = len;
898 mr->mmkey.pd = to_mpd(pd)->pdn;
903 static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
904 void *xlt, int page_shift, size_t size,
907 struct mlx5_ib_dev *dev = mr->dev;
908 struct ib_umem *umem = mr->umem;
910 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
911 if (!umr_can_use_indirect_mkey(dev))
913 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
917 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
919 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
920 __mlx5_ib_populate_pas(dev, umem, page_shift,
922 MLX5_IB_MTT_PRESENT);
923 /* Clear padding after the pages
924 * brought from the umem.
926 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
927 size - npages * sizeof(struct mlx5_mtt));
933 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
934 MLX5_UMR_MTT_ALIGNMENT)
935 #define MLX5_SPARE_UMR_CHUNK 0x10000
937 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
938 int page_shift, int flags)
940 struct mlx5_ib_dev *dev = mr->dev;
941 struct device *ddev = dev->ib_dev.dev.parent;
945 struct mlx5_umr_wr wr;
948 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
949 ? sizeof(struct mlx5_klm)
950 : sizeof(struct mlx5_mtt);
951 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
952 const int page_mask = page_align - 1;
953 size_t pages_mapped = 0;
954 size_t pages_to_map = 0;
955 size_t pages_iter = 0;
957 bool use_emergency_page = false;
959 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
960 !umr_can_use_indirect_mkey(dev))
963 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
964 * so we need to align the offset and length accordingly
966 if (idx & page_mask) {
967 npages += idx & page_mask;
971 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
972 gfp |= __GFP_ZERO | __GFP_NOWARN;
974 pages_to_map = ALIGN(npages, page_align);
975 size = desc_size * pages_to_map;
976 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
978 xlt = (void *)__get_free_pages(gfp, get_order(size));
979 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
980 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
981 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
983 size = MLX5_SPARE_UMR_CHUNK;
984 xlt = (void *)__get_free_pages(gfp, get_order(size));
988 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
989 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
991 memset(xlt, 0, size);
992 use_emergency_page = true;
994 pages_iter = size / desc_size;
995 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
996 if (dma_mapping_error(ddev, dma)) {
997 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1003 sg.lkey = dev->umrc.pd->local_dma_lkey;
1005 memset(&wr, 0, sizeof(wr));
1006 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1007 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1008 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1009 wr.wr.sg_list = &sg;
1011 wr.wr.opcode = MLX5_IB_WR_UMR;
1013 wr.pd = mr->ibmr.pd;
1014 wr.mkey = mr->mmkey.key;
1015 wr.length = mr->mmkey.size;
1016 wr.virt_addr = mr->mmkey.iova;
1017 wr.access_flags = mr->access_flags;
1018 wr.page_shift = page_shift;
1020 for (pages_mapped = 0;
1021 pages_mapped < pages_to_map && !err;
1022 pages_mapped += pages_iter, idx += pages_iter) {
1023 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1024 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1025 npages = populate_xlt(mr, idx, npages, xlt,
1026 page_shift, size, flags);
1028 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1030 sg.length = ALIGN(npages * desc_size,
1031 MLX5_UMR_MTT_ALIGNMENT);
1033 if (pages_mapped + pages_iter >= pages_to_map) {
1034 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1036 MLX5_IB_SEND_UMR_ENABLE_MR |
1037 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1038 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1039 if (flags & MLX5_IB_UPD_XLT_PD ||
1040 flags & MLX5_IB_UPD_XLT_ACCESS)
1042 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1043 if (flags & MLX5_IB_UPD_XLT_ADDR)
1045 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1048 wr.offset = idx * desc_size;
1049 wr.xlt_size = sg.length;
1051 err = mlx5_ib_post_send_wait(dev, &wr);
1053 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1056 if (use_emergency_page)
1057 mlx5_ib_put_xlt_emergency_page();
1059 free_pages((unsigned long)xlt, get_order(size));
1065 * If ibmr is NULL it will be allocated by reg_create.
1066 * Else, the given ibmr will be used.
1068 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1069 u64 virt_addr, u64 length,
1070 struct ib_umem *umem, int npages,
1071 int page_shift, int access_flags,
1074 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1075 struct mlx5_ib_mr *mr;
1081 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1083 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1085 return ERR_PTR(-ENOMEM);
1088 mr->access_flags = access_flags;
1090 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1092 inlen += sizeof(*pas) * roundup(npages, 2);
1093 in = kvzalloc(inlen, GFP_KERNEL);
1098 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1099 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
1100 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1101 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1103 /* The pg_access bit allows setting the access flags
1104 * in the page list submitted with the command. */
1105 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1107 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1108 MLX5_SET(mkc, mkc, free, !populate);
1109 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1110 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1111 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1112 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1113 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1114 MLX5_SET(mkc, mkc, lr, 1);
1115 MLX5_SET(mkc, mkc, umr_en, 1);
1117 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1118 MLX5_SET64(mkc, mkc, len, length);
1119 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1120 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1121 MLX5_SET(mkc, mkc, translations_octword_size,
1122 get_octo_len(virt_addr, length, page_shift));
1123 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1124 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1126 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1127 get_octo_len(virt_addr, length, page_shift));
1130 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1132 mlx5_ib_warn(dev, "create mkey failed\n");
1135 mr->mmkey.type = MLX5_MKEY_MR;
1136 mr->desc_size = sizeof(struct mlx5_mtt);
1140 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1151 return ERR_PTR(err);
1154 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1155 int npages, u64 length, int access_flags)
1157 mr->npages = npages;
1158 atomic_add(npages, &dev->mdev->priv.reg_pages);
1159 mr->ibmr.lkey = mr->mmkey.key;
1160 mr->ibmr.rkey = mr->mmkey.key;
1161 mr->ibmr.length = length;
1162 mr->access_flags = access_flags;
1165 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1166 u64 length, int acc, int mode)
1168 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1169 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1170 struct mlx5_core_dev *mdev = dev->mdev;
1171 struct mlx5_ib_mr *mr;
1176 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1178 return ERR_PTR(-ENOMEM);
1180 in = kzalloc(inlen, GFP_KERNEL);
1186 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1188 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1189 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1190 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
1191 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
1192 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
1193 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
1194 MLX5_SET(mkc, mkc, lr, 1);
1196 MLX5_SET64(mkc, mkc, len, length);
1197 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1198 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1199 MLX5_SET64(mkc, mkc, start_addr, start_addr);
1201 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
1208 set_mr_fields(dev, mr, 0, length, acc);
1218 return ERR_PTR(err);
1221 int mlx5_ib_advise_mr(struct ib_pd *pd,
1222 enum ib_uverbs_advise_mr_advice advice,
1224 struct ib_sge *sg_list,
1226 struct uverbs_attr_bundle *attrs)
1228 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1229 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE)
1232 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1236 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1237 struct ib_dm_mr_attr *attr,
1238 struct uverbs_attr_bundle *attrs)
1240 struct mlx5_ib_dm *mdm = to_mdm(dm);
1241 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1242 u64 start_addr = mdm->dev_addr + attr->offset;
1245 switch (mdm->type) {
1246 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1247 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1248 return ERR_PTR(-EINVAL);
1250 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1251 start_addr -= pci_resource_start(dev->pdev, 0);
1253 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1254 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1255 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1256 return ERR_PTR(-EINVAL);
1258 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1261 return ERR_PTR(-EINVAL);
1264 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1265 attr->access_flags, mode);
1268 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1269 u64 virt_addr, int access_flags,
1270 struct ib_udata *udata)
1272 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1273 struct mlx5_ib_mr *mr = NULL;
1274 bool populate_mtts = false;
1275 struct ib_umem *umem;
1282 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1283 return ERR_PTR(-EOPNOTSUPP);
1285 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1286 start, virt_addr, length, access_flags);
1288 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
1289 length == U64_MAX) {
1290 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1291 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1292 return ERR_PTR(-EINVAL);
1294 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1296 return ERR_CAST(mr);
1300 err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
1301 &npages, &page_shift, &ncont, &order);
1304 return ERR_PTR(err);
1306 if (use_umr(dev, order)) {
1307 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1308 page_shift, order, access_flags);
1309 if (PTR_ERR(mr) == -EAGAIN) {
1310 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
1313 populate_mtts = false;
1314 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1315 if (access_flags & IB_ACCESS_ON_DEMAND) {
1317 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1320 populate_mtts = true;
1324 if (!umr_can_modify_entity_size(dev))
1325 populate_mtts = true;
1326 mutex_lock(&dev->slow_path_mutex);
1327 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1328 page_shift, access_flags, populate_mtts);
1329 mutex_unlock(&dev->slow_path_mutex);
1337 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1340 set_mr_fields(dev, mr, npages, length, access_flags);
1344 if (!populate_mtts) {
1345 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1347 if (access_flags & IB_ACCESS_ON_DEMAND)
1348 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1350 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1355 return ERR_PTR(err);
1359 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1361 atomic_set(&mr->num_pending_prefetch, 0);
1366 ib_umem_release(umem);
1367 return ERR_PTR(err);
1370 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1372 struct mlx5_core_dev *mdev = dev->mdev;
1373 struct mlx5_umr_wr umrwr = {};
1375 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1378 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1379 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1380 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1381 umrwr.pd = dev->umrc.pd;
1382 umrwr.mkey = mr->mmkey.key;
1383 umrwr.ignore_free_state = 1;
1385 return mlx5_ib_post_send_wait(dev, &umrwr);
1388 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1389 int access_flags, int flags)
1391 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1392 struct mlx5_umr_wr umrwr = {};
1395 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1397 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1398 umrwr.mkey = mr->mmkey.key;
1400 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
1402 umrwr.access_flags = access_flags;
1403 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1406 err = mlx5_ib_post_send_wait(dev, &umrwr);
1411 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1412 u64 length, u64 virt_addr, int new_access_flags,
1413 struct ib_pd *new_pd, struct ib_udata *udata)
1415 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1416 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1417 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1418 int access_flags = flags & IB_MR_REREG_ACCESS ?
1429 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1430 start, virt_addr, length, access_flags);
1432 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1437 if (flags & IB_MR_REREG_TRANS) {
1441 addr = mr->umem->address;
1442 len = mr->umem->length;
1445 if (flags != IB_MR_REREG_PD) {
1447 * Replace umem. This needs to be done whether or not UMR is
1450 flags |= IB_MR_REREG_TRANS;
1451 ib_umem_release(mr->umem);
1453 err = mr_umem_get(dev, udata, addr, len, access_flags,
1454 &mr->umem, &npages, &page_shift, &ncont,
1460 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1462 * UMR can't be used - MKey needs to be replaced.
1464 if (mr->allocated_from_cache)
1465 err = unreg_umr(dev, mr);
1467 err = destroy_mkey(dev, mr);
1471 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1472 page_shift, access_flags, true);
1480 mr->allocated_from_cache = 0;
1481 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1488 mr->access_flags = access_flags;
1489 mr->mmkey.iova = addr;
1490 mr->mmkey.size = len;
1491 mr->mmkey.pd = to_mpd(pd)->pdn;
1493 if (flags & IB_MR_REREG_TRANS) {
1494 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1495 if (flags & IB_MR_REREG_PD)
1496 upd_flags |= MLX5_IB_UPD_XLT_PD;
1497 if (flags & IB_MR_REREG_ACCESS)
1498 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1499 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1502 err = rereg_umr(pd, mr, access_flags, flags);
1509 set_mr_fields(dev, mr, npages, len, access_flags);
1515 ib_umem_release(mr->umem);
1523 mlx5_alloc_priv_descs(struct ib_device *device,
1524 struct mlx5_ib_mr *mr,
1528 int size = ndescs * desc_size;
1532 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1534 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1535 if (!mr->descs_alloc)
1538 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1540 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1541 size, DMA_TO_DEVICE);
1542 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1549 kfree(mr->descs_alloc);
1555 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1558 struct ib_device *device = mr->ibmr.device;
1559 int size = mr->max_descs * mr->desc_size;
1561 dma_unmap_single(device->dev.parent, mr->desc_map,
1562 size, DMA_TO_DEVICE);
1563 kfree(mr->descs_alloc);
1568 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1570 int allocated_from_cache = mr->allocated_from_cache;
1573 if (mlx5_core_destroy_psv(dev->mdev,
1574 mr->sig->psv_memory.psv_idx))
1575 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1576 mr->sig->psv_memory.psv_idx);
1577 if (mlx5_core_destroy_psv(dev->mdev,
1578 mr->sig->psv_wire.psv_idx))
1579 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1580 mr->sig->psv_wire.psv_idx);
1585 if (!allocated_from_cache) {
1586 destroy_mkey(dev, mr);
1587 mlx5_free_priv_descs(mr);
1591 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1593 int npages = mr->npages;
1594 struct ib_umem *umem = mr->umem;
1596 if (is_odp_mr(mr)) {
1597 struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
1599 /* Prevent new page faults and
1600 * prefetch requests from succeeding
1604 /* dequeue pending prefetch requests for the mr */
1605 if (atomic_read(&mr->num_pending_prefetch))
1606 flush_workqueue(system_unbound_wq);
1607 WARN_ON(atomic_read(&mr->num_pending_prefetch));
1609 /* Wait for all running page-fault handlers to finish. */
1610 synchronize_srcu(&dev->mr_srcu);
1611 /* Destroy all page mappings */
1612 if (umem_odp->page_list)
1613 mlx5_ib_invalidate_range(umem_odp,
1614 ib_umem_start(umem_odp),
1615 ib_umem_end(umem_odp));
1617 mlx5_ib_free_implicit_mr(mr);
1619 * We kill the umem before the MR for ODP,
1620 * so that there will not be any invalidations in
1621 * flight, looking at the *mr struct.
1623 ib_umem_release(umem);
1624 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1626 /* Avoid double-freeing the umem. */
1633 * We should unregister the DMA address from the HCA before
1634 * remove the DMA mapping.
1636 mlx5_mr_cache_free(dev, mr);
1637 ib_umem_release(umem);
1639 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1641 if (!mr->allocated_from_cache)
1645 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1647 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1649 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1650 dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
1651 dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
1654 dereg_mr(to_mdev(ibmr->device), mmr);
1659 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1660 int access_mode, int page_shift)
1664 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1666 MLX5_SET(mkc, mkc, free, 1);
1667 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1668 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1669 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1670 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
1671 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
1672 MLX5_SET(mkc, mkc, umr_en, 1);
1673 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1676 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1677 int ndescs, int desc_size, int page_shift,
1678 int access_mode, u32 *in, int inlen)
1680 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1683 mr->access_mode = access_mode;
1684 mr->desc_size = desc_size;
1685 mr->max_descs = ndescs;
1687 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1691 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
1693 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1695 goto err_free_descs;
1697 mr->mmkey.type = MLX5_MKEY_MR;
1698 mr->ibmr.lkey = mr->mmkey.key;
1699 mr->ibmr.rkey = mr->mmkey.key;
1704 mlx5_free_priv_descs(mr);
1708 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
1709 u32 max_num_sg, u32 max_num_meta_sg,
1710 int desc_size, int access_mode)
1712 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1713 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
1715 struct mlx5_ib_mr *mr;
1719 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1721 return ERR_PTR(-ENOMEM);
1724 mr->ibmr.device = pd->device;
1726 in = kzalloc(inlen, GFP_KERNEL);
1732 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
1733 page_shift = PAGE_SHIFT;
1735 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
1736 access_mode, in, inlen);
1749 return ERR_PTR(err);
1752 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1753 int ndescs, u32 *in, int inlen)
1755 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
1756 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
1760 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1761 int ndescs, u32 *in, int inlen)
1763 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
1764 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1767 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1768 int max_num_sg, int max_num_meta_sg,
1771 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1776 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1780 /* create mem & wire PSVs */
1781 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
1785 mr->sig->psv_memory.psv_idx = psv_index[0];
1786 mr->sig->psv_wire.psv_idx = psv_index[1];
1788 mr->sig->sig_status_checked = true;
1789 mr->sig->sig_err_exists = false;
1790 /* Next UMR, Arm SIGERR */
1791 ++mr->sig->sigerr_count;
1792 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1793 sizeof(struct mlx5_klm),
1794 MLX5_MKC_ACCESS_MODE_KLMS);
1795 if (IS_ERR(mr->klm_mr)) {
1796 err = PTR_ERR(mr->klm_mr);
1797 goto err_destroy_psv;
1799 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1800 sizeof(struct mlx5_mtt),
1801 MLX5_MKC_ACCESS_MODE_MTT);
1802 if (IS_ERR(mr->mtt_mr)) {
1803 err = PTR_ERR(mr->mtt_mr);
1804 goto err_free_klm_mr;
1807 /* Set bsf descriptors for mkey */
1808 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1809 MLX5_SET(mkc, mkc, bsf_en, 1);
1810 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1812 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
1813 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1815 goto err_free_mtt_mr;
1820 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
1823 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
1826 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
1827 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1828 mr->sig->psv_memory.psv_idx);
1829 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1830 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1831 mr->sig->psv_wire.psv_idx);
1838 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
1839 enum ib_mr_type mr_type, u32 max_num_sg,
1840 u32 max_num_meta_sg)
1842 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1843 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1844 int ndescs = ALIGN(max_num_sg, 4);
1845 struct mlx5_ib_mr *mr;
1849 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1851 return ERR_PTR(-ENOMEM);
1853 in = kzalloc(inlen, GFP_KERNEL);
1859 mr->ibmr.device = pd->device;
1863 case IB_MR_TYPE_MEM_REG:
1864 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
1866 case IB_MR_TYPE_SG_GAPS:
1867 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
1869 case IB_MR_TYPE_INTEGRITY:
1870 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
1871 max_num_meta_sg, in, inlen);
1874 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1889 return ERR_PTR(err);
1892 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1893 u32 max_num_sg, struct ib_udata *udata)
1895 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
1898 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1899 u32 max_num_sg, u32 max_num_meta_sg)
1901 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
1905 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1906 struct ib_udata *udata)
1908 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1909 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1910 struct mlx5_ib_mw *mw = NULL;
1915 struct mlx5_ib_alloc_mw req = {};
1918 __u32 response_length;
1921 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1923 return ERR_PTR(err);
1925 if (req.comp_mask || req.reserved1 || req.reserved2)
1926 return ERR_PTR(-EOPNOTSUPP);
1928 if (udata->inlen > sizeof(req) &&
1929 !ib_is_udata_cleared(udata, sizeof(req),
1930 udata->inlen - sizeof(req)))
1931 return ERR_PTR(-EOPNOTSUPP);
1933 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1935 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1936 in = kzalloc(inlen, GFP_KERNEL);
1942 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1944 MLX5_SET(mkc, mkc, free, 1);
1945 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1946 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1947 MLX5_SET(mkc, mkc, umr_en, 1);
1948 MLX5_SET(mkc, mkc, lr, 1);
1949 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
1950 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1951 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1953 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
1957 mw->mmkey.type = MLX5_MKEY_MW;
1958 mw->ibmw.rkey = mw->mmkey.key;
1959 mw->ndescs = ndescs;
1961 resp.response_length = min(offsetof(typeof(resp), response_length) +
1962 sizeof(resp.response_length), udata->outlen);
1963 if (resp.response_length) {
1964 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1966 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1977 return ERR_PTR(err);
1980 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1982 struct mlx5_ib_mw *mmw = to_mmw(mw);
1985 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1992 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1993 struct ib_mr_status *mr_status)
1995 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1998 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1999 pr_err("Invalid status check mask\n");
2004 mr_status->fail_status = 0;
2005 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2008 pr_err("signature status check requested on a non-signature enabled MR\n");
2012 mmr->sig->sig_status_checked = true;
2013 if (!mmr->sig->sig_err_exists)
2016 if (ibmr->lkey == mmr->sig->err_item.key)
2017 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2018 sizeof(mr_status->sig_err));
2020 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2021 mr_status->sig_err.sig_err_offset = 0;
2022 mr_status->sig_err.key = mmr->sig->err_item.key;
2025 mmr->sig->sig_err_exists = false;
2026 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2034 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2035 int data_sg_nents, unsigned int *data_sg_offset,
2036 struct scatterlist *meta_sg, int meta_sg_nents,
2037 unsigned int *meta_sg_offset)
2039 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2040 unsigned int sg_offset = 0;
2043 mr->meta_length = 0;
2044 if (data_sg_nents == 1) {
2048 sg_offset = *data_sg_offset;
2049 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2050 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2051 if (meta_sg_nents == 1) {
2053 mr->meta_ndescs = 1;
2055 sg_offset = *meta_sg_offset;
2058 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2059 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2061 ibmr->length = mr->data_length + mr->meta_length;
2068 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2069 struct scatterlist *sgl,
2070 unsigned short sg_nents,
2071 unsigned int *sg_offset_p,
2072 struct scatterlist *meta_sgl,
2073 unsigned short meta_sg_nents,
2074 unsigned int *meta_sg_offset_p)
2076 struct scatterlist *sg = sgl;
2077 struct mlx5_klm *klms = mr->descs;
2078 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2079 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2082 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2083 mr->ibmr.length = 0;
2085 for_each_sg(sgl, sg, sg_nents, i) {
2086 if (unlikely(i >= mr->max_descs))
2088 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2089 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2090 klms[i].key = cpu_to_be32(lkey);
2091 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2097 *sg_offset_p = sg_offset;
2100 mr->data_length = mr->ibmr.length;
2102 if (meta_sg_nents) {
2104 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2105 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2106 if (unlikely(i + j >= mr->max_descs))
2108 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2110 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2112 klms[i + j].key = cpu_to_be32(lkey);
2113 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2117 if (meta_sg_offset_p)
2118 *meta_sg_offset_p = sg_offset;
2120 mr->meta_ndescs = j;
2121 mr->meta_length = mr->ibmr.length - mr->data_length;
2127 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2129 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2132 if (unlikely(mr->ndescs == mr->max_descs))
2136 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2141 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2143 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2146 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
2150 descs[mr->ndescs + mr->meta_ndescs++] =
2151 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2157 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2158 int data_sg_nents, unsigned int *data_sg_offset,
2159 struct scatterlist *meta_sg, int meta_sg_nents,
2160 unsigned int *meta_sg_offset)
2162 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2163 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2167 pi_mr->meta_ndescs = 0;
2168 pi_mr->meta_length = 0;
2170 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2171 pi_mr->desc_size * pi_mr->max_descs,
2174 pi_mr->ibmr.page_size = ibmr->page_size;
2175 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2177 if (n != data_sg_nents)
2180 pi_mr->data_iova = pi_mr->ibmr.iova;
2181 pi_mr->data_length = pi_mr->ibmr.length;
2182 pi_mr->ibmr.length = pi_mr->data_length;
2183 ibmr->length = pi_mr->data_length;
2185 if (meta_sg_nents) {
2186 u64 page_mask = ~((u64)ibmr->page_size - 1);
2187 u64 iova = pi_mr->data_iova;
2189 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2190 meta_sg_offset, mlx5_set_page_pi);
2192 pi_mr->meta_length = pi_mr->ibmr.length;
2194 * PI address for the HW is the offset of the metadata address
2195 * relative to the first data page address.
2196 * It equals to first data page address + size of data pages +
2197 * metadata offset at the first metadata page
2199 pi_mr->pi_iova = (iova & page_mask) +
2200 pi_mr->ndescs * ibmr->page_size +
2201 (pi_mr->ibmr.iova & ~page_mask);
2203 * In order to use one MTT MR for data and metadata, we register
2204 * also the gaps between the end of the data and the start of
2205 * the metadata (the sig MR will verify that the HW will access
2206 * to right addresses). This mapping is safe because we use
2207 * internal mkey for the registration.
2209 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2210 pi_mr->ibmr.iova = iova;
2211 ibmr->length += pi_mr->meta_length;
2214 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2215 pi_mr->desc_size * pi_mr->max_descs,
2222 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2223 int data_sg_nents, unsigned int *data_sg_offset,
2224 struct scatterlist *meta_sg, int meta_sg_nents,
2225 unsigned int *meta_sg_offset)
2227 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2228 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2232 pi_mr->meta_ndescs = 0;
2233 pi_mr->meta_length = 0;
2235 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2236 pi_mr->desc_size * pi_mr->max_descs,
2239 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2240 meta_sg, meta_sg_nents, meta_sg_offset);
2242 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2243 pi_mr->desc_size * pi_mr->max_descs,
2246 /* This is zero-based memory region */
2247 pi_mr->data_iova = 0;
2248 pi_mr->ibmr.iova = 0;
2249 pi_mr->pi_iova = pi_mr->data_length;
2250 ibmr->length = pi_mr->ibmr.length;
2255 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2256 int data_sg_nents, unsigned int *data_sg_offset,
2257 struct scatterlist *meta_sg, int meta_sg_nents,
2258 unsigned int *meta_sg_offset)
2260 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2261 struct mlx5_ib_mr *pi_mr = NULL;
2264 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2267 mr->data_length = 0;
2269 mr->meta_ndescs = 0;
2272 * As a performance optimization, if possible, there is no need to
2273 * perform UMR operation to register the data/metadata buffers.
2274 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2275 * Fallback to UMR only in case of a failure.
2277 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2278 data_sg_offset, meta_sg, meta_sg_nents,
2280 if (n == data_sg_nents + meta_sg_nents)
2283 * As a performance optimization, if possible, there is no need to map
2284 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2285 * descriptors and fallback to KLM only in case of a failure.
2286 * It's more efficient for the HW to work with MTT descriptors
2287 * (especially in high load).
2288 * Use KLM (indirect access) only if it's mandatory.
2291 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2292 data_sg_offset, meta_sg, meta_sg_nents,
2294 if (n == data_sg_nents + meta_sg_nents)
2298 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2299 data_sg_offset, meta_sg, meta_sg_nents,
2301 if (unlikely(n != data_sg_nents + meta_sg_nents))
2305 /* This is zero-based memory region */
2309 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2311 ibmr->sig_attrs->meta_length = mr->meta_length;
2316 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2317 unsigned int *sg_offset)
2319 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2324 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2325 mr->desc_size * mr->max_descs,
2328 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2329 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2332 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2335 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2336 mr->desc_size * mr->max_descs,