1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frwr_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
27 * As an optimization, frwr_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mrs immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
32 * But this means that frwr_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
47 * When the underlying transport disconnects, MRs are left in one of
50 * INVALID: The MR was not in use before the QP entered ERROR state.
52 * VALID: The MR was registered before the QP entered ERROR state.
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
60 * When frwr_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mrs list when recovery is
64 * complete. frwr_map allocates another MR for the current RPC while
65 * the broken MR is reset.
67 * To ensure that frwr_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
73 #include <linux/sunrpc/rpc_rdma.h>
74 #include <linux/sunrpc/svc_rdma.h>
76 #include "xprt_rdma.h"
77 #include <trace/events/rpcrdma.h>
79 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80 # define RPCDBG_FACILITY RPCDBG_TRANS
84 * frwr_is_supported - Check if device supports FRWR
85 * @device: interface adapter to check
87 * Returns true if device supports FRWR, otherwise false
89 bool frwr_is_supported(struct ib_device *device)
91 struct ib_device_attr *attrs = &device->attrs;
93 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
94 goto out_not_supported;
95 if (attrs->max_fast_reg_page_list_len == 0)
96 goto out_not_supported;
100 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
106 * frwr_release_mr - Destroy one MR
107 * @mr: MR allocated by frwr_init_mr
110 void frwr_release_mr(struct rpcrdma_mr *mr)
114 rc = ib_dereg_mr(mr->frwr.fr_mr);
116 trace_xprtrdma_frwr_dereg(mr, rc);
121 /* MRs are dynamically allocated, so simply clean up and release the MR.
122 * A replacement MR will subsequently be allocated on demand.
125 frwr_mr_recycle_worker(struct work_struct *work)
127 struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
128 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
130 trace_xprtrdma_mr_recycle(mr);
132 if (mr->mr_dir != DMA_NONE) {
133 trace_xprtrdma_mr_unmap(mr);
134 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
135 mr->mr_sg, mr->mr_nents, mr->mr_dir);
136 mr->mr_dir = DMA_NONE;
139 spin_lock(&r_xprt->rx_buf.rb_mrlock);
140 list_del(&mr->mr_all);
141 r_xprt->rx_stats.mrs_recycled++;
142 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
148 * frwr_init_mr - Initialize one MR
149 * @ia: interface adapter
150 * @mr: generic MR to prepare for FRWR
152 * Returns zero if successful. Otherwise a negative errno
155 int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
157 unsigned int depth = ia->ri_max_frwr_depth;
158 struct scatterlist *sg;
162 frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
166 sg = kcalloc(depth, sizeof(*sg), GFP_KERNEL);
170 mr->frwr.fr_mr = frmr;
171 mr->frwr.fr_state = FRWR_IS_INVALID;
172 mr->mr_dir = DMA_NONE;
173 INIT_LIST_HEAD(&mr->mr_list);
174 INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
175 init_completion(&mr->frwr.fr_linv_done);
177 sg_init_table(sg, depth);
183 trace_xprtrdma_frwr_alloc(mr, rc);
187 dprintk("RPC: %s: sg allocation failure\n",
194 * frwr_open - Prepare an endpoint for use with FRWR
195 * @ia: interface adapter this endpoint will use
196 * @ep: endpoint to prepare
199 * ep->rep_attr.cap.max_send_wr
200 * ep->rep_attr.cap.max_recv_wr
201 * ep->rep_max_requests
204 * And these FRWR-related fields:
205 * ia->ri_max_frwr_depth
208 * On failure, a negative errno is returned.
210 int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
212 struct ib_device_attr *attrs = &ia->ri_id->device->attrs;
213 int max_qp_wr, depth, delta;
215 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
216 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
217 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
219 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
220 * capability, but perform optimally when the MRs are not larger
223 if (attrs->max_sge_rd > 1)
224 ia->ri_max_frwr_depth = attrs->max_sge_rd;
226 ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
227 if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
228 ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
229 dprintk("RPC: %s: max FR page list depth = %u\n",
230 __func__, ia->ri_max_frwr_depth);
232 /* Add room for frwr register and invalidate WRs.
233 * 1. FRWR reg WR for head
234 * 2. FRWR invalidate WR for head
235 * 3. N FRWR reg WRs for pagelist
236 * 4. N FRWR invalidate WRs for pagelist
237 * 5. FRWR reg WR for tail
238 * 6. FRWR invalidate WR for tail
239 * 7. The RDMA_SEND WR
243 /* Calculate N if the device max FRWR depth is smaller than
244 * RPCRDMA_MAX_DATA_SEGS.
246 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
247 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
249 depth += 2; /* FRWR reg + invalidate */
250 delta -= ia->ri_max_frwr_depth;
254 max_qp_wr = ia->ri_id->device->attrs.max_qp_wr;
255 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
257 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
259 if (ep->rep_max_requests > max_qp_wr)
260 ep->rep_max_requests = max_qp_wr;
261 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
262 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
263 ep->rep_max_requests = max_qp_wr / depth;
264 if (!ep->rep_max_requests)
266 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
268 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
269 ep->rep_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
270 ep->rep_attr.cap.max_recv_wr = ep->rep_max_requests;
271 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
272 ep->rep_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
274 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
275 ia->ri_max_frwr_depth);
276 /* Reply chunks require segments for head and tail buffers */
277 ia->ri_max_segs += 2;
278 if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
279 ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
284 * frwr_maxpages - Compute size of largest payload
287 * Returns maximum size of an RPC message, in pages.
289 * FRWR mode conveys a list of pages per chunk segment. The
290 * maximum length of that list is the FRWR page list depth.
292 size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
294 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
296 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
297 (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
301 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
302 * @cq: completion queue (ignored)
307 frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
309 struct ib_cqe *cqe = wc->wr_cqe;
310 struct rpcrdma_frwr *frwr =
311 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
313 /* WARNING: Only wr_cqe and status are reliable at this point */
314 if (wc->status != IB_WC_SUCCESS)
315 frwr->fr_state = FRWR_FLUSHED_FR;
316 trace_xprtrdma_wc_fastreg(wc, frwr);
320 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
321 * @cq: completion queue (ignored)
326 frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
328 struct ib_cqe *cqe = wc->wr_cqe;
329 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
332 /* WARNING: Only wr_cqe and status are reliable at this point */
333 if (wc->status != IB_WC_SUCCESS)
334 frwr->fr_state = FRWR_FLUSHED_LI;
335 trace_xprtrdma_wc_li(wc, frwr);
339 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
340 * @cq: completion queue (ignored)
343 * Awaken anyone waiting for an MR to finish being fenced.
346 frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
348 struct ib_cqe *cqe = wc->wr_cqe;
349 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
352 /* WARNING: Only wr_cqe and status are reliable at this point */
353 if (wc->status != IB_WC_SUCCESS)
354 frwr->fr_state = FRWR_FLUSHED_LI;
355 trace_xprtrdma_wc_li_wake(wc, frwr);
356 complete(&frwr->fr_linv_done);
360 * frwr_map - Register a memory region
361 * @r_xprt: controlling transport
362 * @seg: memory region co-ordinates
363 * @nsegs: number of segments remaining
364 * @writing: true when RDMA Write will be used
365 * @xid: XID of RPC using the registered memory
366 * @out: initialized MR
368 * Prepare a REG_MR Work Request to register a memory region
369 * for remote access via RDMA READ or RDMA WRITE.
371 * Returns the next segment or a negative errno pointer.
372 * On success, the prepared MR is planted in @out.
374 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
375 struct rpcrdma_mr_seg *seg,
376 int nsegs, bool writing, __be32 xid,
377 struct rpcrdma_mr **out)
379 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
380 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
381 struct rpcrdma_frwr *frwr;
382 struct rpcrdma_mr *mr;
384 struct ib_reg_wr *reg_wr;
391 rpcrdma_mr_recycle(mr);
392 mr = rpcrdma_mr_get(r_xprt);
394 return ERR_PTR(-EAGAIN);
395 } while (mr->frwr.fr_state != FRWR_IS_INVALID);
397 frwr->fr_state = FRWR_IS_VALID;
399 if (nsegs > ia->ri_max_frwr_depth)
400 nsegs = ia->ri_max_frwr_depth;
401 for (i = 0; i < nsegs;) {
403 sg_set_page(&mr->mr_sg[i],
406 offset_in_page(seg->mr_offset));
408 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
415 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
416 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
419 mr->mr_dir = rpcrdma_data_dir(writing);
422 ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir);
427 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
428 if (unlikely(n != mr->mr_nents))
431 ibmr->iova &= 0x00000000ffffffff;
432 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
433 key = (u8)(ibmr->rkey & 0x000000FF);
434 ib_update_fast_reg_key(ibmr, ++key);
436 reg_wr = &frwr->fr_regwr;
438 reg_wr->key = ibmr->rkey;
439 reg_wr->access = writing ?
440 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
441 IB_ACCESS_REMOTE_READ;
443 mr->mr_handle = ibmr->rkey;
444 mr->mr_length = ibmr->length;
445 mr->mr_offset = ibmr->iova;
446 trace_xprtrdma_mr_map(mr);
452 mr->mr_dir = DMA_NONE;
453 trace_xprtrdma_frwr_sgerr(mr, i);
455 return ERR_PTR(-EIO);
458 trace_xprtrdma_frwr_maperr(mr, n);
459 rpcrdma_mr_recycle(mr);
460 return ERR_PTR(-EIO);
464 * frwr_send - post Send WR containing the RPC Call message
465 * @ia: interface adapter
466 * @req: Prepared RPC Call
468 * For FRWR, chain any FastReg WRs to the Send WR. Only a
469 * single ib_post_send call is needed to register memory
470 * and then post the Send WR.
472 * Returns the result of ib_post_send.
474 int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
476 struct ib_send_wr *post_wr;
477 struct rpcrdma_mr *mr;
479 post_wr = &req->rl_sendctx->sc_wr;
480 list_for_each_entry(mr, &req->rl_registered, mr_list) {
481 struct rpcrdma_frwr *frwr;
485 frwr->fr_cqe.done = frwr_wc_fastreg;
486 frwr->fr_regwr.wr.next = post_wr;
487 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
488 frwr->fr_regwr.wr.num_sge = 0;
489 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
490 frwr->fr_regwr.wr.send_flags = 0;
492 post_wr = &frwr->fr_regwr.wr;
495 /* If ib_post_send fails, the next ->send_request for
496 * @req will queue these MRs for recovery.
498 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
502 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
503 * @rep: Received reply
504 * @mrs: list of MRs to check
507 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
509 struct rpcrdma_mr *mr;
511 list_for_each_entry(mr, mrs, mr_list)
512 if (mr->mr_handle == rep->rr_inv_rkey) {
513 list_del_init(&mr->mr_list);
514 trace_xprtrdma_mr_remoteinv(mr);
515 mr->frwr.fr_state = FRWR_IS_INVALID;
516 rpcrdma_mr_unmap_and_put(mr);
517 break; /* only one invalidated MR per RPC */
522 * frwr_unmap_sync - invalidate memory regions that were registered for @req
523 * @r_xprt: controlling transport
524 * @mrs: list of MRs to process
526 * Sleeps until it is safe for the host CPU to access the
527 * previously mapped memory regions.
529 * Caller ensures that @mrs is not empty before the call. This
530 * function empties the list.
532 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
534 struct ib_send_wr *first, **prev, *last;
535 const struct ib_send_wr *bad_wr;
536 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
537 struct rpcrdma_frwr *frwr;
538 struct rpcrdma_mr *mr;
541 /* ORDER: Invalidate all of the MRs first
543 * Chain the LOCAL_INV Work Requests and post them with
544 * a single ib_post_send() call.
549 list_for_each_entry(mr, mrs, mr_list) {
550 mr->frwr.fr_state = FRWR_IS_INVALID;
553 trace_xprtrdma_mr_localinv(mr);
555 frwr->fr_cqe.done = frwr_wc_localinv;
556 last = &frwr->fr_invwr;
557 memset(last, 0, sizeof(*last));
558 last->wr_cqe = &frwr->fr_cqe;
559 last->opcode = IB_WR_LOCAL_INV;
560 last->ex.invalidate_rkey = mr->mr_handle;
569 /* Strong send queue ordering guarantees that when the
570 * last WR in the chain completes, all WRs in the chain
573 last->send_flags = IB_SEND_SIGNALED;
574 frwr->fr_cqe.done = frwr_wc_localinv_wake;
575 reinit_completion(&frwr->fr_linv_done);
577 /* Transport disconnect drains the receive CQ before it
578 * replaces the QP. The RPC reply handler won't call us
579 * unless ri_id->qp is a valid pointer.
581 r_xprt->rx_stats.local_inv_needed++;
583 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
585 wait_for_completion(&frwr->fr_linv_done);
589 /* ORDER: Now DMA unmap all of the MRs, and return
590 * them to the free MR list.
593 while (!list_empty(mrs)) {
594 mr = rpcrdma_mr_pop(mrs);
595 rpcrdma_mr_unmap_and_put(mr);
600 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
602 /* Unmap and release the MRs in the LOCAL_INV WRs that did not
606 frwr = container_of(bad_wr, struct rpcrdma_frwr,
608 mr = container_of(frwr, struct rpcrdma_mr, frwr);
609 bad_wr = bad_wr->next;
611 list_del_init(&mr->mr_list);
612 rpcrdma_mr_recycle(mr);