2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
46 * When the underlying transport disconnects, MRs are left in one of
49 * INVALID: The MR was not in use before the QP entered ERROR state.
51 * VALID: The MR was registered before the QP entered ERROR state.
53 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
54 * state, and the pending WR was flushed.
56 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
57 * state, and the pending WR was flushed.
59 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
60 * with ib_dereg_mr and then are re-initialized. Because MR recovery
61 * allocates fresh resources, it is deferred to a workqueue, and the
62 * recovered MRs are placed back on the rb_mws list when recovery is
63 * complete. frwr_op_map allocates another MR for the current RPC while
64 * the broken MR is reset.
66 * To ensure that frwr_op_map doesn't encounter an MR that is marked
67 * INVALID but that is about to be flushed due to a previous transport
68 * disconnect, the transport connect worker attempts to drain all
69 * pending send queue WRs before the transport is reconnected.
72 #include <linux/sunrpc/rpc_rdma.h>
74 #include "xprt_rdma.h"
76 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
77 # define RPCDBG_FACILITY RPCDBG_TRANS
81 frwr_is_supported(struct rpcrdma_ia *ia)
83 struct ib_device_attr *attrs = &ia->ri_device->attrs;
85 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
86 goto out_not_supported;
87 if (attrs->max_fast_reg_page_list_len == 0)
88 goto out_not_supported;
92 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
98 frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
100 unsigned int depth = ia->ri_max_frmr_depth;
101 struct rpcrdma_frmr *f = &r->frmr;
104 f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG, depth);
105 if (IS_ERR(f->fr_mr))
108 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
112 sg_init_table(r->mw_sg, depth);
113 init_completion(&f->fr_linv_done);
117 rc = PTR_ERR(f->fr_mr);
118 dprintk("RPC: %s: ib_alloc_mr status %i\n",
124 dprintk("RPC: %s: sg allocation failure\n",
126 ib_dereg_mr(f->fr_mr);
131 frwr_op_release_mr(struct rpcrdma_mw *r)
135 /* Ensure MW is not on any rl_registered list */
136 if (!list_empty(&r->mw_list))
137 list_del(&r->mw_list);
139 rc = ib_dereg_mr(r->frmr.fr_mr);
141 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
148 __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
150 struct rpcrdma_frmr *f = &r->frmr;
153 rc = ib_dereg_mr(f->fr_mr);
155 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
160 f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
161 ia->ri_max_frmr_depth);
162 if (IS_ERR(f->fr_mr)) {
163 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
164 PTR_ERR(f->fr_mr), r);
165 return PTR_ERR(f->fr_mr);
168 dprintk("RPC: %s: recovered FRMR %p\n", __func__, f);
169 f->fr_state = FRMR_IS_INVALID;
173 /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
175 * There's no recovery if this fails. The FRMR is abandoned, but
176 * remains in rb_all. It will be cleaned up when the transport is
180 frwr_op_recover_mr(struct rpcrdma_mw *mw)
182 enum rpcrdma_frmr_state state = mw->frmr.fr_state;
183 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
184 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
187 rc = __frwr_reset_mr(ia, mw);
188 if (state != FRMR_FLUSHED_LI)
189 ib_dma_unmap_sg(ia->ri_device,
190 mw->mw_sg, mw->mw_nents, mw->mw_dir);
194 rpcrdma_put_mw(r_xprt, mw);
195 r_xprt->rx_stats.mrs_recovered++;
199 pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
200 r_xprt->rx_stats.mrs_orphaned++;
202 spin_lock(&r_xprt->rx_buf.rb_mwlock);
203 list_del(&mw->mw_all);
204 spin_unlock(&r_xprt->rx_buf.rb_mwlock);
206 frwr_op_release_mr(mw);
210 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
211 struct rpcrdma_create_data_internal *cdata)
215 ia->ri_max_frmr_depth =
216 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
217 ia->ri_device->attrs.max_fast_reg_page_list_len);
218 dprintk("RPC: %s: device's max FR page list len = %u\n",
219 __func__, ia->ri_max_frmr_depth);
221 /* Add room for frmr register and invalidate WRs.
222 * 1. FRMR reg WR for head
223 * 2. FRMR invalidate WR for head
224 * 3. N FRMR reg WRs for pagelist
225 * 4. N FRMR invalidate WRs for pagelist
226 * 5. FRMR reg WR for tail
227 * 6. FRMR invalidate WR for tail
228 * 7. The RDMA_SEND WR
232 /* Calculate N if the device max FRMR depth is smaller than
233 * RPCRDMA_MAX_DATA_SEGS.
235 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
236 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
238 depth += 2; /* FRMR reg + invalidate */
239 delta -= ia->ri_max_frmr_depth;
243 ep->rep_attr.cap.max_send_wr *= depth;
244 if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
245 cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
246 if (!cdata->max_requests)
248 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
252 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
253 ia->ri_max_frmr_depth);
257 /* FRWR mode conveys a list of pages per chunk segment. The
258 * maximum length of that list is the FRWR page list depth.
261 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
263 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
265 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
266 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
270 __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
272 if (wc->status != IB_WC_WR_FLUSH_ERR)
273 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
274 wr, ib_wc_status_msg(wc->status),
275 wc->status, wc->vendor_err);
279 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
280 * @cq: completion queue (ignored)
285 frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
287 struct rpcrdma_frmr *frmr;
290 /* WARNING: Only wr_cqe and status are reliable at this point */
291 if (wc->status != IB_WC_SUCCESS) {
293 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
294 frmr->fr_state = FRMR_FLUSHED_FR;
295 __frwr_sendcompletion_flush(wc, "fastreg");
300 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
301 * @cq: completion queue (ignored)
306 frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
308 struct rpcrdma_frmr *frmr;
311 /* WARNING: Only wr_cqe and status are reliable at this point */
312 if (wc->status != IB_WC_SUCCESS) {
314 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
315 frmr->fr_state = FRMR_FLUSHED_LI;
316 __frwr_sendcompletion_flush(wc, "localinv");
321 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
322 * @cq: completion queue (ignored)
325 * Awaken anyone waiting for an MR to finish being fenced.
328 frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
330 struct rpcrdma_frmr *frmr;
333 /* WARNING: Only wr_cqe and status are reliable at this point */
335 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
336 if (wc->status != IB_WC_SUCCESS) {
337 frmr->fr_state = FRMR_FLUSHED_LI;
338 __frwr_sendcompletion_flush(wc, "localinv");
340 complete(&frmr->fr_linv_done);
343 /* Post a REG_MR Work Request to register a memory region
344 * for remote access via RDMA READ or RDMA WRITE.
347 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
348 int nsegs, bool writing, struct rpcrdma_mw **out)
350 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
351 struct rpcrdma_mw *mw;
352 struct rpcrdma_frmr *frmr;
354 struct ib_reg_wr *reg_wr;
355 struct ib_send_wr *bad_wr;
356 int rc, i, n, dma_nents;
362 rpcrdma_defer_mr_recovery(mw);
363 mw = rpcrdma_get_mw(r_xprt);
366 } while (mw->frmr.fr_state != FRMR_IS_INVALID);
368 frmr->fr_state = FRMR_IS_VALID;
370 reg_wr = &frmr->fr_regwr;
372 if (nsegs > ia->ri_max_frmr_depth)
373 nsegs = ia->ri_max_frmr_depth;
374 for (i = 0; i < nsegs;) {
376 sg_set_page(&mw->mw_sg[i],
379 offset_in_page(seg->mr_offset));
381 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
387 /* Check for holes */
388 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
389 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
393 mw->mw_dir = rpcrdma_data_dir(writing);
397 dma_nents = ib_dma_map_sg(ia->ri_device,
398 mw->mw_sg, mw->mw_nents, mw->mw_dir);
402 n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
403 if (unlikely(n != mw->mw_nents))
406 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
407 __func__, frmr, mw->mw_nents, mr->length);
409 key = (u8)(mr->rkey & 0x000000FF);
410 ib_update_fast_reg_key(mr, ++key);
412 reg_wr->wr.next = NULL;
413 reg_wr->wr.opcode = IB_WR_REG_MR;
414 frmr->fr_cqe.done = frwr_wc_fastreg;
415 reg_wr->wr.wr_cqe = &frmr->fr_cqe;
416 reg_wr->wr.num_sge = 0;
417 reg_wr->wr.send_flags = 0;
419 reg_wr->key = mr->rkey;
420 reg_wr->access = writing ?
421 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
422 IB_ACCESS_REMOTE_READ;
424 DECR_CQCOUNT(&r_xprt->rx_ep);
425 rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr);
429 mw->mw_handle = mr->rkey;
430 mw->mw_length = mr->length;
431 mw->mw_offset = mr->iova;
437 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
438 mw->mw_sg, mw->mw_nents);
439 rpcrdma_defer_mr_recovery(mw);
443 pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
444 frmr->fr_mr, n, mw->mw_nents);
445 rpcrdma_defer_mr_recovery(mw);
449 pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
450 rpcrdma_defer_mr_recovery(mw);
454 static struct ib_send_wr *
455 __frwr_prepare_linv_wr(struct rpcrdma_mw *mw)
457 struct rpcrdma_frmr *f = &mw->frmr;
458 struct ib_send_wr *invalidate_wr;
460 dprintk("RPC: %s: invalidating frmr %p\n", __func__, f);
462 f->fr_state = FRMR_IS_INVALID;
463 invalidate_wr = &f->fr_invwr;
465 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
466 f->fr_cqe.done = frwr_wc_localinv;
467 invalidate_wr->wr_cqe = &f->fr_cqe;
468 invalidate_wr->opcode = IB_WR_LOCAL_INV;
469 invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
471 return invalidate_wr;
474 /* Invalidate all memory regions that were registered for "req".
476 * Sleeps until it is safe for the host CPU to access the
477 * previously mapped memory regions.
479 * Caller ensures that req->rl_registered is not empty.
482 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
484 struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
485 struct rpcrdma_rep *rep = req->rl_reply;
486 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
487 struct rpcrdma_mw *mw, *tmp;
488 struct rpcrdma_frmr *f;
491 dprintk("RPC: %s: req %p\n", __func__, req);
493 /* ORDER: Invalidate all of the req's MRs first
495 * Chain the LOCAL_INV Work Requests and post them with
496 * a single ib_post_send() call.
499 invalidate_wrs = pos = prev = NULL;
500 list_for_each_entry(mw, &req->rl_registered, mw_list) {
501 if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) &&
502 (mw->mw_handle == rep->rr_inv_rkey)) {
503 mw->frmr.fr_state = FRMR_IS_INVALID;
507 pos = __frwr_prepare_linv_wr(mw);
510 invalidate_wrs = pos;
519 /* Strong send queue ordering guarantees that when the
520 * last WR in the chain completes, all WRs in the chain
523 f->fr_invwr.send_flags = IB_SEND_SIGNALED;
524 f->fr_cqe.done = frwr_wc_localinv_wake;
525 reinit_completion(&f->fr_linv_done);
526 INIT_CQCOUNT(&r_xprt->rx_ep);
528 /* Transport disconnect drains the receive CQ before it
529 * replaces the QP. The RPC reply handler won't call us
530 * unless ri_id->qp is a valid pointer.
532 r_xprt->rx_stats.local_inv_needed++;
533 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
537 wait_for_completion(&f->fr_linv_done);
539 /* ORDER: Now DMA unmap all of the req's MRs, and return
540 * them to the free MW list.
543 list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
544 dprintk("RPC: %s: unmapping frmr %p\n",
545 __func__, &mw->frmr);
546 list_del_init(&mw->mw_list);
547 ib_dma_unmap_sg(ia->ri_device,
548 mw->mw_sg, mw->mw_nents, mw->mw_dir);
549 rpcrdma_put_mw(r_xprt, mw);
554 pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
555 rdma_disconnect(ia->ri_id);
557 /* Find and reset the MRs in the LOCAL_INV WRs that did not
558 * get posted. This is synchronous, and slow.
560 list_for_each_entry(mw, &req->rl_registered, mw_list) {
562 if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
563 __frwr_reset_mr(ia, mw);
564 bad_wr = bad_wr->next;
570 /* Use a slow, safe mechanism to invalidate all memory regions
571 * that were registered for "req".
574 frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
577 struct rpcrdma_mw *mw;
579 while (!list_empty(&req->rl_registered)) {
580 mw = list_first_entry(&req->rl_registered,
581 struct rpcrdma_mw, mw_list);
582 list_del_init(&mw->mw_list);
585 frwr_op_recover_mr(mw);
587 rpcrdma_defer_mr_recovery(mw);
591 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
592 .ro_map = frwr_op_map,
593 .ro_unmap_sync = frwr_op_unmap_sync,
594 .ro_unmap_safe = frwr_op_unmap_safe,
595 .ro_recover_mr = frwr_op_recover_mr,
596 .ro_open = frwr_op_open,
597 .ro_maxpages = frwr_op_maxpages,
598 .ro_init_mr = frwr_op_init_mr,
599 .ro_release_mr = frwr_op_release_mr,
600 .ro_displayname = "frwr",
601 .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,