2 * Copyright (c) 2016 HGST, a Western Digital Company.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 #include <linux/moduleparam.h>
14 #include <linux/slab.h>
15 #include <linux/pci-p2pdma.h>
16 #include <rdma/mr_pool.h>
26 static bool rdma_rw_force_mr;
27 module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
28 MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
31 * Check if the device might use memory registration. This is currently only
32 * true for iWarp devices. In the future we can hopefully fine tune this based
33 * on HCA driver input.
35 static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
37 if (rdma_protocol_iwarp(dev, port_num))
39 if (unlikely(rdma_rw_force_mr))
45 * Check if the device will use memory registration for this RW operation.
46 * We currently always use memory registrations for iWarp RDMA READs, and
47 * have a debug option to force usage of MRs.
49 * XXX: In the future we can hopefully fine tune this based on HCA driver
52 static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
53 enum dma_data_direction dir, int dma_nents)
55 if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
57 if (unlikely(rdma_rw_force_mr))
62 static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
64 /* arbitrary limit to avoid allocating gigantic resources */
65 return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
68 /* Caller must have zero-initialized *reg. */
69 static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
70 struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
71 u32 sg_cnt, u32 offset)
73 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
74 u32 nents = min(sg_cnt, pages_per_mr);
77 reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
81 if (reg->mr->need_inval) {
82 reg->inv_wr.opcode = IB_WR_LOCAL_INV;
83 reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
84 reg->inv_wr.next = ®->reg_wr.wr;
87 reg->inv_wr.next = NULL;
90 ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
91 if (ret < 0 || ret < nents) {
92 ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
96 reg->reg_wr.wr.opcode = IB_WR_REG_MR;
97 reg->reg_wr.mr = reg->mr;
98 reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
99 if (rdma_protocol_iwarp(qp->device, port_num))
100 reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
103 reg->sge.addr = reg->mr->iova;
104 reg->sge.length = reg->mr->length;
108 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
109 u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
110 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
112 struct rdma_rw_reg_ctx *prev = NULL;
113 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
114 int i, j, ret = 0, count = 0;
116 ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
117 ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
123 for (i = 0; i < ctx->nr_ops; i++) {
124 struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
125 u32 nents = min(sg_cnt, pages_per_mr);
127 ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
134 if (reg->mr->need_inval)
135 prev->wr.wr.next = ®->inv_wr;
137 prev->wr.wr.next = ®->reg_wr.wr;
140 reg->reg_wr.wr.next = ®->wr.wr;
142 reg->wr.wr.sg_list = ®->sge;
143 reg->wr.wr.num_sge = 1;
144 reg->wr.remote_addr = remote_addr;
146 if (dir == DMA_TO_DEVICE) {
147 reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
148 } else if (!rdma_cap_read_inv(qp->device, port_num)) {
149 reg->wr.wr.opcode = IB_WR_RDMA_READ;
151 reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
152 reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
156 remote_addr += reg->sge.length;
158 for (j = 0; j < nents; j++)
165 prev->wr.wr.next = NULL;
167 ctx->type = RDMA_RW_MR;
172 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
178 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
179 struct scatterlist *sg, u32 sg_cnt, u32 offset,
180 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
182 u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
185 u32 total_len = 0, i, j;
187 ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
189 ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
193 ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
197 for (i = 0; i < ctx->nr_ops; i++) {
198 struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
199 u32 nr_sge = min(sg_cnt, max_sge);
201 if (dir == DMA_TO_DEVICE)
202 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
204 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
205 rdma_wr->remote_addr = remote_addr + total_len;
206 rdma_wr->rkey = rkey;
207 rdma_wr->wr.num_sge = nr_sge;
208 rdma_wr->wr.sg_list = sge;
210 for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
211 sge->addr = sg_dma_address(sg) + offset;
212 sge->length = sg_dma_len(sg) - offset;
213 sge->lkey = qp->pd->local_dma_lkey;
215 total_len += sge->length;
221 rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
222 &ctx->map.wrs[i + 1].wr : NULL;
225 ctx->type = RDMA_RW_MULTI_WR;
229 kfree(ctx->map.sges);
234 static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
235 struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
236 enum dma_data_direction dir)
238 struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
242 ctx->single.sge.lkey = qp->pd->local_dma_lkey;
243 ctx->single.sge.addr = sg_dma_address(sg) + offset;
244 ctx->single.sge.length = sg_dma_len(sg) - offset;
246 memset(rdma_wr, 0, sizeof(*rdma_wr));
247 if (dir == DMA_TO_DEVICE)
248 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
250 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
251 rdma_wr->wr.sg_list = &ctx->single.sge;
252 rdma_wr->wr.num_sge = 1;
253 rdma_wr->remote_addr = remote_addr;
254 rdma_wr->rkey = rkey;
256 ctx->type = RDMA_RW_SINGLE_WR;
261 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
262 * @ctx: context to initialize
263 * @qp: queue pair to operate on
264 * @port_num: port num to which the connection is bound
265 * @sg: scatterlist to READ/WRITE from/to
266 * @sg_cnt: number of entries in @sg
267 * @sg_offset: current byte offset into @sg
268 * @remote_addr:remote address to read/write (relative to @rkey)
269 * @rkey: remote key to operate on
270 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
272 * Returns the number of WQEs that will be needed on the workqueue if
273 * successful, or a negative error code.
275 int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
276 struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
277 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
279 struct ib_device *dev = qp->pd->device;
282 if (is_pci_p2pdma_page(sg_page(sg)))
283 ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
285 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
292 * Skip to the S/G entry that sg_offset falls into:
295 u32 len = sg_dma_len(sg);
306 if (WARN_ON_ONCE(sg_cnt == 0))
309 if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
310 ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
311 sg_offset, remote_addr, rkey, dir);
312 } else if (sg_cnt > 1) {
313 ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
314 remote_addr, rkey, dir);
316 ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
317 remote_addr, rkey, dir);
325 ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
328 EXPORT_SYMBOL(rdma_rw_ctx_init);
331 * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
332 * @ctx: context to initialize
333 * @qp: queue pair to operate on
334 * @port_num: port num to which the connection is bound
335 * @sg: scatterlist to READ/WRITE from/to
336 * @sg_cnt: number of entries in @sg
337 * @prot_sg: scatterlist to READ/WRITE protection information from/to
338 * @prot_sg_cnt: number of entries in @prot_sg
339 * @sig_attrs: signature offloading algorithms
340 * @remote_addr:remote address to read/write (relative to @rkey)
341 * @rkey: remote key to operate on
342 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
344 * Returns the number of WQEs that will be needed on the workqueue if
345 * successful, or a negative error code.
347 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
348 u8 port_num, struct scatterlist *sg, u32 sg_cnt,
349 struct scatterlist *prot_sg, u32 prot_sg_cnt,
350 struct ib_sig_attrs *sig_attrs,
351 u64 remote_addr, u32 rkey, enum dma_data_direction dir)
353 struct ib_device *dev = qp->pd->device;
354 u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
355 struct ib_rdma_wr *rdma_wr;
356 struct ib_send_wr *prev_wr = NULL;
359 if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
360 pr_err("SG count too large\n");
364 ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
369 ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir);
376 ctx->type = RDMA_RW_SIG_MR;
378 ctx->sig = kcalloc(1, sizeof(*ctx->sig), GFP_KERNEL);
381 goto out_unmap_prot_sg;
384 ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->data, sg, sg_cnt, 0);
388 prev_wr = &ctx->sig->data.reg_wr.wr;
390 ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
391 prot_sg, prot_sg_cnt, 0);
393 goto out_destroy_data_mr;
396 if (ctx->sig->prot.inv_wr.next)
397 prev_wr->next = &ctx->sig->prot.inv_wr;
399 prev_wr->next = &ctx->sig->prot.reg_wr.wr;
400 prev_wr = &ctx->sig->prot.reg_wr.wr;
402 ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs);
403 if (!ctx->sig->sig_mr) {
405 goto out_destroy_prot_mr;
408 if (ctx->sig->sig_mr->need_inval) {
409 memset(&ctx->sig->sig_inv_wr, 0, sizeof(ctx->sig->sig_inv_wr));
411 ctx->sig->sig_inv_wr.opcode = IB_WR_LOCAL_INV;
412 ctx->sig->sig_inv_wr.ex.invalidate_rkey = ctx->sig->sig_mr->rkey;
414 prev_wr->next = &ctx->sig->sig_inv_wr;
415 prev_wr = &ctx->sig->sig_inv_wr;
418 ctx->sig->sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
419 ctx->sig->sig_wr.wr.wr_cqe = NULL;
420 ctx->sig->sig_wr.wr.sg_list = &ctx->sig->data.sge;
421 ctx->sig->sig_wr.wr.num_sge = 1;
422 ctx->sig->sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
423 ctx->sig->sig_wr.sig_attrs = sig_attrs;
424 ctx->sig->sig_wr.sig_mr = ctx->sig->sig_mr;
426 ctx->sig->sig_wr.prot = &ctx->sig->prot.sge;
427 prev_wr->next = &ctx->sig->sig_wr.wr;
428 prev_wr = &ctx->sig->sig_wr.wr;
431 ctx->sig->sig_sge.addr = 0;
432 ctx->sig->sig_sge.length = ctx->sig->data.sge.length;
433 if (sig_attrs->wire.sig_type != IB_SIG_TYPE_NONE)
434 ctx->sig->sig_sge.length += ctx->sig->prot.sge.length;
436 rdma_wr = &ctx->sig->data.wr;
437 rdma_wr->wr.sg_list = &ctx->sig->sig_sge;
438 rdma_wr->wr.num_sge = 1;
439 rdma_wr->remote_addr = remote_addr;
440 rdma_wr->rkey = rkey;
441 if (dir == DMA_TO_DEVICE)
442 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
444 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
445 prev_wr->next = &rdma_wr->wr;
446 prev_wr = &rdma_wr->wr;
453 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr);
455 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr);
459 ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
461 ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
464 EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
467 * Now that we are going to post the WRs we can update the lkey and need_inval
468 * state on the MRs. If we were doing this at init time, we would get double
469 * or missing invalidations if a context was initialized but not actually
472 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
474 reg->mr->need_inval = need_inval;
475 ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
476 reg->reg_wr.key = reg->mr->lkey;
477 reg->sge.lkey = reg->mr->lkey;
481 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
482 * @ctx: context to operate on
483 * @qp: queue pair to operate on
484 * @port_num: port num to which the connection is bound
485 * @cqe: completion queue entry for the last WR
486 * @chain_wr: WR to append to the posted chain
488 * Return the WR chain for the set of RDMA READ/WRITE operations described by
489 * @ctx, as well as any memory registration operations needed. If @chain_wr
490 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
491 * If @chain_wr is not set @cqe must be set so that the caller gets a
492 * completion notification.
494 struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
495 u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
497 struct ib_send_wr *first_wr, *last_wr;
502 rdma_rw_update_lkey(&ctx->sig->data, true);
503 if (ctx->sig->prot.mr)
504 rdma_rw_update_lkey(&ctx->sig->prot, true);
506 ctx->sig->sig_mr->need_inval = true;
507 ib_update_fast_reg_key(ctx->sig->sig_mr,
508 ib_inc_rkey(ctx->sig->sig_mr->lkey));
509 ctx->sig->sig_sge.lkey = ctx->sig->sig_mr->lkey;
511 if (ctx->sig->data.inv_wr.next)
512 first_wr = &ctx->sig->data.inv_wr;
514 first_wr = &ctx->sig->data.reg_wr.wr;
515 last_wr = &ctx->sig->data.wr.wr;
518 for (i = 0; i < ctx->nr_ops; i++) {
519 rdma_rw_update_lkey(&ctx->reg[i],
520 ctx->reg[i].wr.wr.opcode !=
521 IB_WR_RDMA_READ_WITH_INV);
524 if (ctx->reg[0].inv_wr.next)
525 first_wr = &ctx->reg[0].inv_wr;
527 first_wr = &ctx->reg[0].reg_wr.wr;
528 last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
530 case RDMA_RW_MULTI_WR:
531 first_wr = &ctx->map.wrs[0].wr;
532 last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
534 case RDMA_RW_SINGLE_WR:
535 first_wr = &ctx->single.wr.wr;
536 last_wr = &ctx->single.wr.wr;
543 last_wr->next = chain_wr;
545 last_wr->wr_cqe = cqe;
546 last_wr->send_flags |= IB_SEND_SIGNALED;
551 EXPORT_SYMBOL(rdma_rw_ctx_wrs);
554 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
555 * @ctx: context to operate on
556 * @qp: queue pair to operate on
557 * @port_num: port num to which the connection is bound
558 * @cqe: completion queue entry for the last WR
559 * @chain_wr: WR to append to the posted chain
561 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
562 * any memory registration operations needed. If @chain_wr is non-NULL the
563 * WR it points to will be appended to the chain of WRs posted. If @chain_wr
564 * is not set @cqe must be set so that the caller gets a completion
567 int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
568 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
570 struct ib_send_wr *first_wr;
572 first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
573 return ib_post_send(qp, first_wr, NULL);
575 EXPORT_SYMBOL(rdma_rw_ctx_post);
578 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
579 * @ctx: context to release
580 * @qp: queue pair to operate on
581 * @port_num: port num to which the connection is bound
582 * @sg: scatterlist that was used for the READ/WRITE
583 * @sg_cnt: number of entries in @sg
584 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
586 void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
587 struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir)
593 for (i = 0; i < ctx->nr_ops; i++)
594 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
597 case RDMA_RW_MULTI_WR:
599 kfree(ctx->map.sges);
601 case RDMA_RW_SINGLE_WR:
608 /* P2PDMA contexts do not need to be unmapped */
609 if (!is_pci_p2pdma_page(sg_page(sg)))
610 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
612 EXPORT_SYMBOL(rdma_rw_ctx_destroy);
615 * rdma_rw_ctx_destroy_signature - release all resources allocated by
616 * rdma_rw_ctx_init_signature
617 * @ctx: context to release
618 * @qp: queue pair to operate on
619 * @port_num: port num to which the connection is bound
620 * @sg: scatterlist that was used for the READ/WRITE
621 * @sg_cnt: number of entries in @sg
622 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
623 * @prot_sg_cnt: number of entries in @prot_sg
624 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
626 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
627 u8 port_num, struct scatterlist *sg, u32 sg_cnt,
628 struct scatterlist *prot_sg, u32 prot_sg_cnt,
629 enum dma_data_direction dir)
631 if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
634 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr);
635 ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
637 if (ctx->sig->prot.mr) {
638 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr);
639 ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
642 ib_mr_pool_put(qp, &qp->sig_mrs, ctx->sig->sig_mr);
645 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
648 * rdma_rw_mr_factor - return number of MRs required for a payload
649 * @device: device handling the connection
650 * @port_num: port num to which the connection is bound
651 * @maxpages: maximum payload pages per rdma_rw_ctx
653 * Returns the number of MRs the device requires to move @maxpayload
654 * bytes. The returned value is used during transport creation to
655 * compute max_rdma_ctxts and the size of the transport's Send and
656 * Send Completion Queues.
658 unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
659 unsigned int maxpages)
661 unsigned int mr_pages;
663 if (rdma_rw_can_use_mr(device, port_num))
664 mr_pages = rdma_rw_fr_page_list_len(device);
666 mr_pages = device->attrs.max_sge_rd;
667 return DIV_ROUND_UP(maxpages, mr_pages);
669 EXPORT_SYMBOL(rdma_rw_mr_factor);
671 void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
675 WARN_ON_ONCE(attr->port_num == 0);
678 * Each context needs at least one RDMA READ or WRITE WR.
680 * For some hardware we might need more, eventually we should ask the
681 * HCA driver for a multiplier here.
686 * If the devices needs MRs to perform RDMA READ or WRITE operations,
687 * we'll need two additional MRs for the registrations and the
690 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
691 factor += 6; /* (inv + reg) * (data + prot + sig) */
692 else if (rdma_rw_can_use_mr(dev, attr->port_num))
693 factor += 2; /* inv + reg */
695 attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
698 * But maybe we were just too high in the sky and the device doesn't
699 * even support all we need, and we'll have to live with what we get..
701 attr->cap.max_send_wr =
702 min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
705 int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
707 struct ib_device *dev = qp->pd->device;
708 u32 nr_mrs = 0, nr_sig_mrs = 0;
711 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) {
712 nr_sig_mrs = attr->cap.max_rdma_ctxs;
713 nr_mrs = attr->cap.max_rdma_ctxs * 2;
714 } else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
715 nr_mrs = attr->cap.max_rdma_ctxs;
719 ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
721 rdma_rw_fr_page_list_len(dev));
723 pr_err("%s: failed to allocated %d MRs\n",
730 ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
731 IB_MR_TYPE_SIGNATURE, 2);
733 pr_err("%s: failed to allocated %d SIG MRs\n",
735 goto out_free_rdma_mrs;
742 ib_mr_pool_destroy(qp, &qp->rdma_mrs);
746 void rdma_rw_cleanup_mrs(struct ib_qp *qp)
748 ib_mr_pool_destroy(qp, &qp->sig_mrs);
749 ib_mr_pool_destroy(qp, &qp->rdma_mrs);