1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * This file contains the guts of the RPC RDMA protocol, and
46 * does marshaling/unmarshaling, etc. It is also where interfacing
47 * to the Linux RPC framework lives.
50 #include <linux/highmem.h>
52 #include <linux/sunrpc/svc_rdma.h>
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY RPCDBG_TRANS
61 /* Returns size of largest RPC-over-RDMA header in a Call message
63 * The largest Call header contains a full-size Read list and a
64 * minimal Reply chunk.
66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70 /* Fixed header fields and list discriminators */
71 size = RPCRDMA_HDRLEN_MIN;
73 /* Maximum Read list size */
74 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
76 /* Minimal Read chunk size */
77 size += sizeof(__be32); /* segment count */
78 size += rpcrdma_segment_maxsz * sizeof(__be32);
79 size += sizeof(__be32); /* list discriminator */
81 dprintk("RPC: %s: max call header size = %u\n",
86 /* Returns size of largest RPC-over-RDMA header in a Reply message
88 * There is only one Write list or one Reply chunk per Reply
89 * message. The larger list is the Write list.
91 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
95 /* Fixed header fields and list discriminators */
96 size = RPCRDMA_HDRLEN_MIN;
98 /* Maximum Write list size */
99 size = sizeof(__be32); /* segment count */
100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
101 size += sizeof(__be32); /* list discriminator */
103 dprintk("RPC: %s: max reply header size = %u\n",
109 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110 * @r_xprt: transport instance to initialize
112 * The max_inline fields contain the maximum size of an RPC message
113 * so the marshaling code doesn't have to repeat this calculation
116 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
118 unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
121 ep->rep_max_inline_send =
122 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 ep->rep_max_inline_recv =
124 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
127 /* The client can send a request inline as long as the RPCRDMA header
128 * plus the RPC call fit under the transport's inline limit. If the
129 * combined call message size exceeds that limit, the client must use
130 * a Read chunk for this operation.
132 * A Read chunk is also required if sending the RPC call inline would
133 * exceed this device's max_sge limit.
135 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 struct rpc_rqst *rqst)
138 struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 unsigned int count, remaining, offset;
141 if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
145 remaining = xdr->page_len;
146 offset = offset_in_page(xdr->page_base);
147 count = RPCRDMA_MIN_SEND_SGES;
149 remaining -= min_t(unsigned int,
150 PAGE_SIZE - offset, remaining);
152 if (++count > r_xprt->rx_ia.ri_max_send_sges)
160 /* The client can't know how large the actual reply will be. Thus it
161 * plans for the largest possible reply for that particular ULP
162 * operation. If the maximum combined reply message size exceeds that
163 * limit, the client must provide a write list or a reply chunk for
166 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 struct rpc_rqst *rqst)
169 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
172 /* The client is required to provide a Reply chunk if the maximum
173 * size of the non-payload part of the RPC Reply is larger than
174 * the inline threshold.
177 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 const struct rpc_rqst *rqst)
180 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
182 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 r_xprt->rx_ep.rep_max_inline_recv;
186 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
187 * a byte range. Other modes coalesce these SGEs into a single MR
190 * Returns pointer to next available SGE, and bumps the total number
193 static struct rpcrdma_mr_seg *
194 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
197 u32 remaining, page_offset;
200 base = vec->iov_base;
201 page_offset = offset_in_page(base);
202 remaining = vec->iov_len;
205 seg->mr_offset = base;
206 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
207 remaining -= seg->mr_len;
216 /* Convert @xdrbuf into SGEs no larger than a page each. As they
217 * are registered, these SGEs are then coalesced into RDMA segments
218 * when the selected memreg mode supports it.
220 * Returns positive number of SGEs consumed, or a negative errno.
224 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
225 unsigned int pos, enum rpcrdma_chunktype type,
226 struct rpcrdma_mr_seg *seg)
228 unsigned long page_base;
230 struct page **ppages;
234 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
236 len = xdrbuf->page_len;
237 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
238 page_base = offset_in_page(xdrbuf->page_base);
240 /* ACL likes to be lazy in allocating pages - ACLs
241 * are small by default but can get huge.
243 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
245 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
249 seg->mr_page = *ppages;
250 seg->mr_offset = (char *)page_base;
251 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
259 /* When encoding a Read chunk, the tail iovec contains an
260 * XDR pad and may be omitted.
262 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
265 /* When encoding a Write chunk, some servers need to see an
266 * extra segment for non-XDR-aligned Write chunks. The upper
267 * layer provides space in the tail iovec that may be used
270 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
273 if (xdrbuf->tail[0].iov_len)
274 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
277 if (unlikely(n > RPCRDMA_MAX_SEGS))
283 encode_item_present(struct xdr_stream *xdr)
287 p = xdr_reserve_space(xdr, sizeof(*p));
296 encode_item_not_present(struct xdr_stream *xdr)
300 p = xdr_reserve_space(xdr, sizeof(*p));
309 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
311 *iptr++ = cpu_to_be32(mr->mr_handle);
312 *iptr++ = cpu_to_be32(mr->mr_length);
313 xdr_encode_hyper(iptr, mr->mr_offset);
317 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
321 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
325 xdr_encode_rdma_segment(p, mr);
330 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
335 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
339 *p++ = xdr_one; /* Item present */
340 *p++ = cpu_to_be32(position);
341 xdr_encode_rdma_segment(p, mr);
345 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
346 struct rpcrdma_req *req,
347 struct rpcrdma_mr_seg *seg,
348 int nsegs, bool writing,
349 struct rpcrdma_mr **mr)
351 *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
353 *mr = rpcrdma_mr_get(r_xprt);
356 trace_xprtrdma_mr_get(req);
360 rpcrdma_mr_push(*mr, &req->rl_registered);
361 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
364 trace_xprtrdma_nomrs(req);
365 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
366 rpcrdma_mrs_refresh(r_xprt);
367 return ERR_PTR(-EAGAIN);
370 /* Register and XDR encode the Read list. Supports encoding a list of read
371 * segments that belong to a single read chunk.
373 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
375 * Read chunklist (a linked list):
376 * N elements, position P (same P for all chunks of same arg!):
377 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
379 * Returns zero on success, or a negative errno if a failure occurred.
380 * @xdr is advanced to the next position in the stream.
382 * Only a single @pos value is currently supported.
384 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
385 struct rpcrdma_req *req,
386 struct rpc_rqst *rqst,
387 enum rpcrdma_chunktype rtype)
389 struct xdr_stream *xdr = &req->rl_stream;
390 struct rpcrdma_mr_seg *seg;
391 struct rpcrdma_mr *mr;
395 if (rtype == rpcrdma_noch)
398 pos = rqst->rq_snd_buf.head[0].iov_len;
399 if (rtype == rpcrdma_areadch)
401 seg = req->rl_segments;
402 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
408 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
412 if (encode_read_segment(xdr, mr, pos) < 0)
415 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
416 r_xprt->rx_stats.read_chunk_count++;
417 nsegs -= mr->mr_nents;
421 return encode_item_not_present(xdr);
424 /* Register and XDR encode the Write list. Supports encoding a list
425 * containing one array of plain segments that belong to a single
428 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
430 * Write chunklist (a list of (one) counted array):
432 * 1 - N - HLOO - HLOO - ... - HLOO - 0
434 * Returns zero on success, or a negative errno if a failure occurred.
435 * @xdr is advanced to the next position in the stream.
437 * Only a single Write chunk is currently supported.
439 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
440 struct rpcrdma_req *req,
441 struct rpc_rqst *rqst,
442 enum rpcrdma_chunktype wtype)
444 struct xdr_stream *xdr = &req->rl_stream;
445 struct rpcrdma_mr_seg *seg;
446 struct rpcrdma_mr *mr;
450 if (wtype != rpcrdma_writech)
453 seg = req->rl_segments;
454 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
455 rqst->rq_rcv_buf.head[0].iov_len,
460 if (encode_item_present(xdr) < 0)
462 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
463 if (unlikely(!segcount))
465 /* Actual value encoded below */
469 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
473 if (encode_rdma_segment(xdr, mr) < 0)
476 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
477 r_xprt->rx_stats.write_chunk_count++;
478 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
480 nsegs -= mr->mr_nents;
483 /* Update count of segments in this Write chunk */
484 *segcount = cpu_to_be32(nchunks);
487 return encode_item_not_present(xdr);
490 /* Register and XDR encode the Reply chunk. Supports encoding an array
491 * of plain segments that belong to a single write (reply) chunk.
493 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
495 * Reply chunk (a counted array):
497 * 1 - N - HLOO - HLOO - ... - HLOO
499 * Returns zero on success, or a negative errno if a failure occurred.
500 * @xdr is advanced to the next position in the stream.
502 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
503 struct rpcrdma_req *req,
504 struct rpc_rqst *rqst,
505 enum rpcrdma_chunktype wtype)
507 struct xdr_stream *xdr = &req->rl_stream;
508 struct rpcrdma_mr_seg *seg;
509 struct rpcrdma_mr *mr;
513 if (wtype != rpcrdma_replych)
514 return encode_item_not_present(xdr);
516 seg = req->rl_segments;
517 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
521 if (encode_item_present(xdr) < 0)
523 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
524 if (unlikely(!segcount))
526 /* Actual value encoded below */
530 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
534 if (encode_rdma_segment(xdr, mr) < 0)
537 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
538 r_xprt->rx_stats.reply_chunk_count++;
539 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
541 nsegs -= mr->mr_nents;
544 /* Update count of segments in the Reply chunk */
545 *segcount = cpu_to_be32(nchunks);
550 static void rpcrdma_sendctx_done(struct kref *kref)
552 struct rpcrdma_req *req =
553 container_of(kref, struct rpcrdma_req, rl_kref);
554 struct rpcrdma_rep *rep = req->rl_reply;
556 rpcrdma_complete_rqst(rep);
557 rep->rr_rxprt->rx_stats.reply_waits_for_send++;
561 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
562 * @sc: sendctx containing SGEs to unmap
565 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
569 if (!sc->sc_unmap_count)
572 /* The first two SGEs contain the transport header and
573 * the inline buffer. These are always left mapped so
574 * they can be cheaply re-used.
576 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
577 ++sge, --sc->sc_unmap_count)
578 ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
581 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
584 /* Prepare an SGE for the RPC-over-RDMA transport header.
586 static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
587 struct rpcrdma_req *req, u32 len)
589 struct rpcrdma_sendctx *sc = req->rl_sendctx;
590 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
591 struct ib_sge *sge = sc->sc_sges;
593 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
595 sge->addr = rdmab_addr(rb);
597 sge->lkey = rdmab_lkey(rb);
599 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
605 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
609 /* Prepare the Send SGEs. The head and tail iovec, and each entry
610 * in the page list, gets its own SGE.
612 static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
613 struct rpcrdma_req *req,
615 enum rpcrdma_chunktype rtype)
617 struct rpcrdma_sendctx *sc = req->rl_sendctx;
618 unsigned int sge_no, page_base, len, remaining;
619 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
620 struct ib_sge *sge = sc->sc_sges;
621 struct page *page, **ppages;
623 /* The head iovec is straightforward, as it is already
624 * DMA-mapped. Sync the content that has changed.
626 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
628 sc->sc_device = rdmab_device(rb);
630 sge[sge_no].addr = rdmab_addr(rb);
631 sge[sge_no].length = xdr->head[0].iov_len;
632 sge[sge_no].lkey = rdmab_lkey(rb);
633 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
634 sge[sge_no].length, DMA_TO_DEVICE);
636 /* If there is a Read chunk, the page list is being handled
637 * via explicit RDMA, and thus is skipped here. However, the
638 * tail iovec may include an XDR pad for the page list, as
639 * well as additional content, and may not reside in the
640 * same page as the head iovec.
642 if (rtype == rpcrdma_readch) {
643 len = xdr->tail[0].iov_len;
645 /* Do not include the tail if it is only an XDR pad */
649 page = virt_to_page(xdr->tail[0].iov_base);
650 page_base = offset_in_page(xdr->tail[0].iov_base);
652 /* If the content in the page list is an odd length,
653 * xdr_write_pages() has added a pad at the beginning
654 * of the tail iovec. Force the tail's non-pad content
655 * to land at the next XDR position in the Send message.
657 page_base += len & 3;
662 /* If there is a page list present, temporarily DMA map
663 * and prepare an SGE for each page to be sent.
666 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
667 page_base = offset_in_page(xdr->page_base);
668 remaining = xdr->page_len;
671 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
672 goto out_mapping_overflow;
674 len = min_t(u32, PAGE_SIZE - page_base, remaining);
676 ib_dma_map_page(rdmab_device(rb), *ppages,
677 page_base, len, DMA_TO_DEVICE);
678 if (ib_dma_mapping_error(rdmab_device(rb),
680 goto out_mapping_err;
681 sge[sge_no].length = len;
682 sge[sge_no].lkey = rdmab_lkey(rb);
684 sc->sc_unmap_count++;
691 /* The tail iovec is not always constructed in the same
692 * page where the head iovec resides (see, for example,
693 * gss_wrap_req_priv). To neatly accommodate that case,
694 * DMA map it separately.
696 if (xdr->tail[0].iov_len) {
697 page = virt_to_page(xdr->tail[0].iov_base);
698 page_base = offset_in_page(xdr->tail[0].iov_base);
699 len = xdr->tail[0].iov_len;
704 ib_dma_map_page(rdmab_device(rb), page, page_base, len,
706 if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
707 goto out_mapping_err;
708 sge[sge_no].length = len;
709 sge[sge_no].lkey = rdmab_lkey(rb);
710 sc->sc_unmap_count++;
714 sc->sc_wr.num_sge += sge_no;
715 if (sc->sc_unmap_count)
716 kref_get(&req->rl_kref);
720 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
723 out_mapping_overflow:
724 rpcrdma_sendctx_unmap(sc);
725 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
729 rpcrdma_sendctx_unmap(sc);
730 trace_xprtrdma_dma_maperr(sge[sge_no].addr);
735 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
736 * @r_xprt: controlling transport
737 * @req: context of RPC Call being marshalled
738 * @hdrlen: size of transport header, in bytes
739 * @xdr: xdr_buf containing RPC Call
740 * @rtype: chunk type being encoded
742 * Returns 0 on success; otherwise a negative errno is returned.
745 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
746 struct rpcrdma_req *req, u32 hdrlen,
747 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
752 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
753 if (!req->rl_sendctx)
755 req->rl_sendctx->sc_wr.num_sge = 0;
756 req->rl_sendctx->sc_unmap_count = 0;
757 req->rl_sendctx->sc_req = req;
758 kref_init(&req->rl_kref);
761 if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
763 if (rtype != rpcrdma_areadch)
764 if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
769 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
774 * rpcrdma_marshal_req - Marshal and send one RPC request
775 * @r_xprt: controlling transport
776 * @rqst: RPC request to be marshaled
778 * For the RPC in "rqst", this function:
779 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
780 * - Registers Read, Write, and Reply chunks
781 * - Constructs the transport header
782 * - Posts a Send WR to send the transport header and request
785 * %0 if the RPC was sent successfully,
786 * %-ENOTCONN if the connection was lost,
787 * %-EAGAIN if the caller should call again with the same arguments,
788 * %-ENOBUFS if the caller should call again after a delay,
789 * %-EMSGSIZE if the transport header is too small,
790 * %-EIO if a permanent problem occurred while marshaling.
793 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
795 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
796 struct xdr_stream *xdr = &req->rl_stream;
797 enum rpcrdma_chunktype rtype, wtype;
802 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
803 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
806 /* Fixed header fields */
808 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
812 *p++ = rpcrdma_version;
813 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
815 /* When the ULP employs a GSS flavor that guarantees integrity
816 * or privacy, direct data placement of individual data items
819 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
820 RPCAUTH_AUTH_DATATOUCH);
823 * Chunks needed for results?
825 * o If the expected result is under the inline threshold, all ops
827 * o Large read ops return data as write chunk(s), header as
829 * o Large non-read ops return as a single reply chunk.
831 if (rpcrdma_results_inline(r_xprt, rqst))
832 wtype = rpcrdma_noch;
833 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
834 rpcrdma_nonpayload_inline(r_xprt, rqst))
835 wtype = rpcrdma_writech;
837 wtype = rpcrdma_replych;
840 * Chunks needed for arguments?
842 * o If the total request is under the inline threshold, all ops
843 * are sent as inline.
844 * o Large write ops transmit data as read chunk(s), header as
846 * o Large non-write ops are sent with the entire message as a
847 * single read chunk (protocol 0-position special case).
849 * This assumes that the upper layer does not present a request
850 * that both has a data payload, and whose non-data arguments
851 * by themselves are larger than the inline threshold.
853 if (rpcrdma_args_inline(r_xprt, rqst)) {
855 rtype = rpcrdma_noch;
856 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
858 rtype = rpcrdma_readch;
860 r_xprt->rx_stats.nomsg_call_count++;
862 rtype = rpcrdma_areadch;
865 /* This implementation supports the following combinations
866 * of chunk lists in one RPC-over-RDMA Call message:
871 * - Read list + Reply chunk
873 * It might not yet support the following combinations:
875 * - Read list + Write list
877 * It does not support the following combinations:
879 * - Write list + Reply chunk
880 * - Read list + Write list + Reply chunk
882 * This implementation supports only a single chunk in each
883 * Read or Write list. Thus for example the client cannot
884 * send a Call message with a Position Zero Read chunk and a
885 * regular Read chunk at the same time.
887 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
890 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
893 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
897 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
898 &rqst->rq_snd_buf, rtype);
902 trace_xprtrdma_marshal(req, rtype, wtype);
906 trace_xprtrdma_marshal_failed(rqst, ret);
907 r_xprt->rx_stats.failed_marshal_count++;
912 static void __rpcrdma_update_cwnd_locked(struct rpc_xprt *xprt,
913 struct rpcrdma_buffer *buf,
916 buf->rb_credits = grant;
917 xprt->cwnd = grant << RPC_CWNDSHIFT;
920 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
922 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
924 spin_lock(&xprt->transport_lock);
925 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
926 spin_unlock(&xprt->transport_lock);
930 * rpcrdma_reset_cwnd - Reset the xprt's congestion window
931 * @r_xprt: controlling transport instance
933 * Prepare @r_xprt for the next connection by reinitializing
934 * its credit grant to one (see RFC 8166, Section 3.3.3).
936 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
938 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
940 spin_lock(&xprt->transport_lock);
942 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
943 spin_unlock(&xprt->transport_lock);
947 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
948 * @rqst: controlling RPC request
949 * @srcp: points to RPC message payload in receive buffer
950 * @copy_len: remaining length of receive buffer content
951 * @pad: Write chunk pad bytes needed (zero for pure inline)
953 * The upper layer has set the maximum number of bytes it can
954 * receive in each component of rq_rcv_buf. These values are set in
955 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
957 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
958 * many cases this function simply updates iov_base pointers in
959 * rq_rcv_buf to point directly to the received reply data, to
960 * avoid copying reply data.
962 * Returns the count of bytes which had to be memcopied.
965 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
967 unsigned long fixup_copy_count;
968 int i, npages, curlen;
970 struct page **ppages;
973 /* The head iovec is redirected to the RPC reply message
974 * in the receive buffer, to avoid a memcopy.
976 rqst->rq_rcv_buf.head[0].iov_base = srcp;
977 rqst->rq_private_buf.head[0].iov_base = srcp;
979 /* The contents of the receive buffer that follow
980 * head.iov_len bytes are copied into the page list.
982 curlen = rqst->rq_rcv_buf.head[0].iov_len;
983 if (curlen > copy_len)
985 trace_xprtrdma_fixup(rqst, copy_len, curlen);
989 ppages = rqst->rq_rcv_buf.pages +
990 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
991 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
992 fixup_copy_count = 0;
993 if (copy_len && rqst->rq_rcv_buf.page_len) {
996 pagelist_len = rqst->rq_rcv_buf.page_len;
997 if (pagelist_len > copy_len)
998 pagelist_len = copy_len;
999 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
1000 for (i = 0; i < npages; i++) {
1001 curlen = PAGE_SIZE - page_base;
1002 if (curlen > pagelist_len)
1003 curlen = pagelist_len;
1005 trace_xprtrdma_fixup_pg(rqst, i, srcp,
1007 destp = kmap_atomic(ppages[i]);
1008 memcpy(destp + page_base, srcp, curlen);
1009 flush_dcache_page(ppages[i]);
1010 kunmap_atomic(destp);
1013 fixup_copy_count += curlen;
1014 pagelist_len -= curlen;
1020 /* Implicit padding for the last segment in a Write
1021 * chunk is inserted inline at the front of the tail
1022 * iovec. The upper layer ignores the content of
1023 * the pad. Simply ensure inline content in the tail
1024 * that follows the Write chunk is properly aligned.
1030 /* The tail iovec is redirected to the remaining data
1031 * in the receive buffer, to avoid a memcopy.
1033 if (copy_len || pad) {
1034 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1035 rqst->rq_private_buf.tail[0].iov_base = srcp;
1038 return fixup_copy_count;
1041 /* By convention, backchannel calls arrive via rdma_msg type
1042 * messages, and never populate the chunk lists. This makes
1043 * the RPC/RDMA header small and fixed in size, so it is
1044 * straightforward to check the RPC header's direction field.
1047 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1048 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1050 struct xdr_stream *xdr = &rep->rr_stream;
1053 if (rep->rr_proc != rdma_msg)
1056 /* Peek at stream contents without advancing. */
1057 p = xdr_inline_decode(xdr, 0);
1060 if (*p++ != xdr_zero)
1062 if (*p++ != xdr_zero)
1064 if (*p++ != xdr_zero)
1068 if (*p++ != rep->rr_xid)
1070 if (*p != cpu_to_be32(RPC_CALL))
1073 /* Now that we are sure this is a backchannel call,
1074 * advance to the RPC header.
1076 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1080 rpcrdma_bc_receive_call(r_xprt, rep);
1084 pr_warn("RPC/RDMA short backward direction call\n");
1087 #else /* CONFIG_SUNRPC_BACKCHANNEL */
1091 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1093 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1099 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1103 handle = be32_to_cpup(p++);
1104 *length = be32_to_cpup(p++);
1105 xdr_decode_hyper(p, &offset);
1107 trace_xprtrdma_decode_seg(handle, *length, offset);
1111 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1113 u32 segcount, seglength;
1116 p = xdr_inline_decode(xdr, sizeof(*p));
1121 segcount = be32_to_cpup(p);
1122 while (segcount--) {
1123 if (decode_rdma_segment(xdr, &seglength))
1125 *length += seglength;
1131 /* In RPC-over-RDMA Version One replies, a Read list is never
1132 * expected. This decoder is a stub that returns an error if
1133 * a Read list is present.
1135 static int decode_read_list(struct xdr_stream *xdr)
1139 p = xdr_inline_decode(xdr, sizeof(*p));
1142 if (unlikely(*p != xdr_zero))
1147 /* Supports only one Write chunk in the Write list
1149 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1158 p = xdr_inline_decode(xdr, sizeof(*p));
1166 if (decode_write_chunk(xdr, &chunklen))
1168 *length += chunklen;
1174 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1178 p = xdr_inline_decode(xdr, sizeof(*p));
1184 if (decode_write_chunk(xdr, length))
1190 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1191 struct rpc_rqst *rqst)
1193 struct xdr_stream *xdr = &rep->rr_stream;
1194 u32 writelist, replychunk, rpclen;
1197 /* Decode the chunk lists */
1198 if (decode_read_list(xdr))
1200 if (decode_write_list(xdr, &writelist))
1202 if (decode_reply_chunk(xdr, &replychunk))
1205 /* RDMA_MSG sanity checks */
1206 if (unlikely(replychunk))
1209 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1210 base = (char *)xdr_inline_decode(xdr, 0);
1211 rpclen = xdr_stream_remaining(xdr);
1212 r_xprt->rx_stats.fixup_copy_count +=
1213 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1215 r_xprt->rx_stats.total_rdma_reply += writelist;
1216 return rpclen + xdr_align_size(writelist);
1220 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1222 struct xdr_stream *xdr = &rep->rr_stream;
1223 u32 writelist, replychunk;
1225 /* Decode the chunk lists */
1226 if (decode_read_list(xdr))
1228 if (decode_write_list(xdr, &writelist))
1230 if (decode_reply_chunk(xdr, &replychunk))
1233 /* RDMA_NOMSG sanity checks */
1234 if (unlikely(writelist))
1236 if (unlikely(!replychunk))
1239 /* Reply chunk buffer already is the reply vector */
1240 r_xprt->rx_stats.total_rdma_reply += replychunk;
1245 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1246 struct rpc_rqst *rqst)
1248 struct xdr_stream *xdr = &rep->rr_stream;
1251 p = xdr_inline_decode(xdr, sizeof(*p));
1257 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1260 dprintk("RPC: %s: server reports "
1261 "version error (%u-%u), xid %08x\n", __func__,
1262 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1263 be32_to_cpu(rep->rr_xid));
1266 dprintk("RPC: %s: server reports "
1267 "header decoding error, xid %08x\n", __func__,
1268 be32_to_cpu(rep->rr_xid));
1271 dprintk("RPC: %s: server reports "
1272 "unrecognized error %d, xid %08x\n", __func__,
1273 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1276 r_xprt->rx_stats.bad_reply_count++;
1280 /* Perform XID lookup, reconstruction of the RPC reply, and
1281 * RPC completion while holding the transport lock to ensure
1282 * the rep, rqst, and rq_task pointers remain stable.
1284 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1286 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1287 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1288 struct rpc_rqst *rqst = rep->rr_rqst;
1291 switch (rep->rr_proc) {
1293 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1296 status = rpcrdma_decode_nomsg(r_xprt, rep);
1299 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1308 spin_lock(&xprt->queue_lock);
1309 xprt_complete_rqst(rqst->rq_task, status);
1310 xprt_unpin_rqst(rqst);
1311 spin_unlock(&xprt->queue_lock);
1314 /* If the incoming reply terminated a pending RPC, the next
1315 * RPC call will post a replacement receive buffer as it is
1319 trace_xprtrdma_reply_hdr(rep);
1320 r_xprt->rx_stats.bad_reply_count++;
1324 static void rpcrdma_reply_done(struct kref *kref)
1326 struct rpcrdma_req *req =
1327 container_of(kref, struct rpcrdma_req, rl_kref);
1329 rpcrdma_complete_rqst(req->rl_reply);
1333 * rpcrdma_reply_handler - Process received RPC/RDMA messages
1334 * @rep: Incoming rpcrdma_rep object to process
1336 * Errors must result in the RPC task either being awakened, or
1337 * allowed to timeout, to discover the errors at that time.
1339 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1341 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1342 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1343 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1344 struct rpcrdma_req *req;
1345 struct rpc_rqst *rqst;
1349 /* Any data means we had a useful conversation, so
1350 * then we don't need to delay the next reconnect.
1352 if (xprt->reestablish_timeout)
1353 xprt->reestablish_timeout = 0;
1355 /* Fixed transport header fields */
1356 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1357 rep->rr_hdrbuf.head[0].iov_base, NULL);
1358 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1360 goto out_shortreply;
1362 rep->rr_vers = *p++;
1363 credits = be32_to_cpu(*p++);
1364 rep->rr_proc = *p++;
1366 if (rep->rr_vers != rpcrdma_version)
1367 goto out_badversion;
1369 if (rpcrdma_is_bcall(r_xprt, rep))
1372 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1373 * get context for handling any incoming chunks.
1375 spin_lock(&xprt->queue_lock);
1376 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1379 xprt_pin_rqst(rqst);
1380 spin_unlock(&xprt->queue_lock);
1383 credits = 1; /* don't deadlock */
1384 else if (credits > buf->rb_max_requests)
1385 credits = buf->rb_max_requests;
1386 if (buf->rb_credits != credits)
1387 rpcrdma_update_cwnd(r_xprt, credits);
1388 rpcrdma_post_recvs(r_xprt, false);
1390 req = rpcr_to_rdmar(rqst);
1391 if (req->rl_reply) {
1392 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1393 rpcrdma_recv_buffer_put(req->rl_reply);
1395 req->rl_reply = rep;
1396 rep->rr_rqst = rqst;
1398 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1400 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1401 frwr_reminv(rep, &req->rl_registered);
1402 if (!list_empty(&req->rl_registered))
1403 frwr_unmap_async(r_xprt, req);
1404 /* LocalInv completion will complete the RPC */
1406 kref_put(&req->rl_kref, rpcrdma_reply_done);
1410 trace_xprtrdma_reply_vers(rep);
1414 spin_unlock(&xprt->queue_lock);
1415 trace_xprtrdma_reply_rqst(rep);
1419 trace_xprtrdma_reply_short(rep);
1422 rpcrdma_recv_buffer_put(rep);