2 * Copyright (c) 2016 Oracle. All rights reserved.
4 * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
7 #include <linux/sunrpc/rpc_rdma.h>
8 #include <linux/sunrpc/svc_rdma.h>
9 #include <linux/sunrpc/debug.h>
13 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
15 /* Each R/W context contains state for one chain of RDMA Read or
16 * Write Work Requests.
18 * Each WR chain handles a single contiguous server-side buffer,
19 * because scatterlist entries after the first have to start on
20 * page alignment. xdr_buf iovecs cannot guarantee alignment.
22 * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
23 * from a client may contain a unique R_key, so each WR chain moves
24 * up to one segment at a time.
26 * The scatterlist makes this data structure over 4KB in size. To
27 * make it less likely to fail, and to handle the allocation for
28 * smaller I/O requests without disabling bottom-halves, these
29 * contexts are created on demand, but cached and reused until the
30 * controlling svcxprt_rdma is destroyed.
32 struct svc_rdma_rw_ctxt {
33 struct list_head rw_list;
34 struct rdma_rw_ctx rw_ctx;
36 struct sg_table rw_sg_table;
37 struct scatterlist rw_first_sgl[0];
40 static inline struct svc_rdma_rw_ctxt *
41 svc_rdma_next_ctxt(struct list_head *list)
43 return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
47 static struct svc_rdma_rw_ctxt *
48 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
50 struct svc_rdma_rw_ctxt *ctxt;
52 spin_lock(&rdma->sc_rw_ctxt_lock);
54 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
56 list_del(&ctxt->rw_list);
57 spin_unlock(&rdma->sc_rw_ctxt_lock);
59 spin_unlock(&rdma->sc_rw_ctxt_lock);
60 ctxt = kmalloc(sizeof(*ctxt) +
61 SG_CHUNK_SIZE * sizeof(struct scatterlist),
65 INIT_LIST_HEAD(&ctxt->rw_list);
68 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
69 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
70 ctxt->rw_sg_table.sgl)) {
78 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
79 struct svc_rdma_rw_ctxt *ctxt)
81 sg_free_table_chained(&ctxt->rw_sg_table, true);
83 spin_lock(&rdma->sc_rw_ctxt_lock);
84 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
85 spin_unlock(&rdma->sc_rw_ctxt_lock);
89 * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
90 * @rdma: transport about to be destroyed
93 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
95 struct svc_rdma_rw_ctxt *ctxt;
97 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
98 list_del(&ctxt->rw_list);
103 /* A chunk context tracks all I/O for moving one Read or Write
104 * chunk. This is a a set of rdma_rw's that handle data movement
105 * for all segments of one chunk.
107 * These are small, acquired with a single allocator call, and
108 * no more than one is needed per chunk. They are allocated on
109 * demand, and not cached.
111 struct svc_rdma_chunk_ctxt {
112 struct ib_cqe cc_cqe;
113 struct svcxprt_rdma *cc_rdma;
114 struct list_head cc_rwctxts;
116 enum dma_data_direction cc_dir;
119 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
120 struct svc_rdma_chunk_ctxt *cc,
121 enum dma_data_direction dir)
124 svc_xprt_get(&rdma->sc_xprt);
126 INIT_LIST_HEAD(&cc->cc_rwctxts);
131 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc)
133 struct svcxprt_rdma *rdma = cc->cc_rdma;
134 struct svc_rdma_rw_ctxt *ctxt;
136 while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
137 list_del(&ctxt->rw_list);
139 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
140 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
141 ctxt->rw_nents, cc->cc_dir);
142 svc_rdma_put_rw_ctxt(rdma, ctxt);
144 svc_xprt_put(&rdma->sc_xprt);
147 /* State for sending a Write or Reply chunk.
148 * - Tracks progress of writing one chunk over all its segments
149 * - Stores arguments for the SGL constructor functions
151 struct svc_rdma_write_info {
152 /* write state of this chunk */
153 unsigned int wi_seg_off;
154 unsigned int wi_seg_no;
155 unsigned int wi_nsegs;
158 /* SGL constructor arguments */
159 struct xdr_buf *wi_xdr;
160 unsigned char *wi_base;
161 unsigned int wi_next_off;
163 struct svc_rdma_chunk_ctxt wi_cc;
166 static struct svc_rdma_write_info *
167 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
169 struct svc_rdma_write_info *info;
171 info = kmalloc(sizeof(*info), GFP_KERNEL);
175 info->wi_seg_off = 0;
177 info->wi_nsegs = be32_to_cpup(++chunk);
178 info->wi_segs = ++chunk;
179 svc_rdma_cc_init(rdma, &info->wi_cc, DMA_TO_DEVICE);
183 static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
185 svc_rdma_cc_release(&info->wi_cc);
190 * svc_rdma_write_done - Write chunk completion
191 * @cq: controlling Completion Queue
192 * @wc: Work Completion
194 * Pages under I/O are freed by a subsequent Send completion.
196 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
198 struct ib_cqe *cqe = wc->wr_cqe;
199 struct svc_rdma_chunk_ctxt *cc =
200 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
201 struct svcxprt_rdma *rdma = cc->cc_rdma;
202 struct svc_rdma_write_info *info =
203 container_of(cc, struct svc_rdma_write_info, wi_cc);
205 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
206 wake_up(&rdma->sc_send_wait);
208 if (unlikely(wc->status != IB_WC_SUCCESS)) {
209 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
210 if (wc->status != IB_WC_WR_FLUSH_ERR)
211 pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
212 ib_wc_status_msg(wc->status),
213 wc->status, wc->vendor_err);
216 svc_rdma_write_info_free(info);
219 /* This function sleeps when the transport's Send Queue is congested.
222 * - If ib_post_send() succeeds, only one completion is expected,
223 * even if one or more WRs are flushed. This is true when posting
224 * an rdma_rw_ctx or when posting a single signaled WR.
226 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
228 struct svcxprt_rdma *rdma = cc->cc_rdma;
229 struct svc_xprt *xprt = &rdma->sc_xprt;
230 struct ib_send_wr *first_wr, *bad_wr;
231 struct list_head *tmp;
235 if (cc->cc_sqecount > rdma->sc_sq_depth)
240 list_for_each(tmp, &cc->cc_rwctxts) {
241 struct svc_rdma_rw_ctxt *ctxt;
243 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
244 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
245 rdma->sc_port_num, cqe, first_wr);
250 if (atomic_sub_return(cc->cc_sqecount,
251 &rdma->sc_sq_avail) > 0) {
252 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
258 atomic_inc(&rdma_stat_sq_starve);
259 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
260 wait_event(rdma->sc_send_wait,
261 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
264 pr_err("svcrdma: ib_post_send failed (%d)\n", ret);
265 set_bit(XPT_CLOSE, &xprt->xpt_flags);
267 /* If even one was posted, there will be a completion. */
268 if (bad_wr != first_wr)
271 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
272 wake_up(&rdma->sc_send_wait);
276 /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
278 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
280 struct svc_rdma_rw_ctxt *ctxt)
282 struct scatterlist *sg = ctxt->rw_sg_table.sgl;
284 sg_set_buf(&sg[0], info->wi_base, len);
285 info->wi_base += len;
290 /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
292 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
293 unsigned int remaining,
294 struct svc_rdma_rw_ctxt *ctxt)
296 unsigned int sge_no, sge_bytes, page_off, page_no;
297 struct xdr_buf *xdr = info->wi_xdr;
298 struct scatterlist *sg;
301 page_off = (info->wi_next_off + xdr->page_base) & ~PAGE_MASK;
302 page_no = (info->wi_next_off + xdr->page_base) >> PAGE_SHIFT;
303 page = xdr->pages + page_no;
304 info->wi_next_off += remaining;
305 sg = ctxt->rw_sg_table.sgl;
308 sge_bytes = min_t(unsigned int, remaining,
309 PAGE_SIZE - page_off);
310 sg_set_page(sg, *page, sge_bytes, page_off);
312 remaining -= sge_bytes;
319 ctxt->rw_nents = sge_no;
322 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
326 svc_rdma_build_writes(struct svc_rdma_write_info *info,
327 void (*constructor)(struct svc_rdma_write_info *info,
329 struct svc_rdma_rw_ctxt *ctxt),
330 unsigned int remaining)
332 struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
333 struct svcxprt_rdma *rdma = cc->cc_rdma;
334 struct svc_rdma_rw_ctxt *ctxt;
338 cc->cc_cqe.done = svc_rdma_write_done;
339 seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
341 unsigned int write_len;
342 u32 seg_length, seg_handle;
345 if (info->wi_seg_no >= info->wi_nsegs)
348 seg_handle = be32_to_cpup(seg);
349 seg_length = be32_to_cpup(seg + 1);
350 xdr_decode_hyper(seg + 2, &seg_offset);
351 seg_offset += info->wi_seg_off;
353 write_len = min(remaining, seg_length - info->wi_seg_off);
354 ctxt = svc_rdma_get_rw_ctxt(rdma,
355 (write_len >> PAGE_SHIFT) + 2);
359 constructor(info, write_len, ctxt);
360 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
361 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
362 ctxt->rw_nents, 0, seg_offset,
363 seg_handle, DMA_TO_DEVICE);
367 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
368 cc->cc_sqecount += ret;
369 if (write_len == seg_length - info->wi_seg_off) {
372 info->wi_seg_off = 0;
374 info->wi_seg_off += write_len;
376 remaining -= write_len;
382 dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
387 dprintk("svcrdma: no R/W ctxs available\n");
391 svc_rdma_put_rw_ctxt(rdma, ctxt);
392 pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
396 /* Send one of an xdr_buf's kvecs by itself. To send a Reply
397 * chunk, the whole RPC Reply is written back to the client.
398 * This function writes either the head or tail of the xdr_buf
399 * containing the Reply.
401 static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
404 info->wi_base = vec->iov_base;
405 return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
409 /* Send an xdr_buf's page list by itself. A Write chunk is
410 * just the page list. a Reply chunk is the head, page list,
411 * and tail. This function is shared between the two types
414 static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
418 info->wi_next_off = 0;
419 return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
424 * svc_rdma_send_write_chunk - Write all segments in a Write chunk
425 * @rdma: controlling RDMA transport
426 * @wr_ch: Write chunk provided by client
427 * @xdr: xdr_buf containing the data payload
429 * Returns a non-negative number of bytes the chunk consumed, or
430 * %-E2BIG if the payload was larger than the Write chunk,
431 * %-EINVAL if client provided too many segments,
432 * %-ENOMEM if rdma_rw context pool was exhausted,
433 * %-ENOTCONN if posting failed (connection is lost),
434 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
436 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
439 struct svc_rdma_write_info *info;
445 info = svc_rdma_write_info_alloc(rdma, wr_ch);
449 ret = svc_rdma_send_xdr_pagelist(info, xdr);
453 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
456 return xdr->page_len;
459 svc_rdma_write_info_free(info);
464 * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
465 * @rdma: controlling RDMA transport
466 * @rp_ch: Reply chunk provided by client
467 * @writelist: true if client provided a Write list
468 * @xdr: xdr_buf containing an RPC Reply
470 * Returns a non-negative number of bytes the chunk consumed, or
471 * %-E2BIG if the payload was larger than the Reply chunk,
472 * %-EINVAL if client provided too many segments,
473 * %-ENOMEM if rdma_rw context pool was exhausted,
474 * %-ENOTCONN if posting failed (connection is lost),
475 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
477 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
478 bool writelist, struct xdr_buf *xdr)
480 struct svc_rdma_write_info *info;
483 info = svc_rdma_write_info_alloc(rdma, rp_ch);
487 ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
490 consumed = xdr->head[0].iov_len;
492 /* Send the page list in the Reply chunk only if the
493 * client did not provide Write chunks.
495 if (!writelist && xdr->page_len) {
496 ret = svc_rdma_send_xdr_pagelist(info, xdr);
499 consumed += xdr->page_len;
502 if (xdr->tail[0].iov_len) {
503 ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
506 consumed += xdr->tail[0].iov_len;
509 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
515 svc_rdma_write_info_free(info);