1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * This file contains the guts of the RPC RDMA protocol, and
46 * does marshaling/unmarshaling, etc. It is also where interfacing
47 * to the Linux RPC framework lives.
50 #include <linux/highmem.h>
52 #include <linux/sunrpc/svc_rdma.h>
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY RPCDBG_TRANS
61 /* Returns size of largest RPC-over-RDMA header in a Call message
63 * The largest Call header contains a full-size Read list and a
64 * minimal Reply chunk.
66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70 /* Fixed header fields and list discriminators */
71 size = RPCRDMA_HDRLEN_MIN;
73 /* Maximum Read list size */
74 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
76 /* Minimal Read chunk size */
77 size += sizeof(__be32); /* segment count */
78 size += rpcrdma_segment_maxsz * sizeof(__be32);
79 size += sizeof(__be32); /* list discriminator */
81 dprintk("RPC: %s: max call header size = %u\n",
86 /* Returns size of largest RPC-over-RDMA header in a Reply message
88 * There is only one Write list or one Reply chunk per Reply
89 * message. The larger list is the Write list.
91 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
95 /* Fixed header fields and list discriminators */
96 size = RPCRDMA_HDRLEN_MIN;
98 /* Maximum Write list size */
99 size = sizeof(__be32); /* segment count */
100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
101 size += sizeof(__be32); /* list discriminator */
103 dprintk("RPC: %s: max reply header size = %u\n",
109 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110 * @r_xprt: transport instance to initialize
112 * The max_inline fields contain the maximum size of an RPC message
113 * so the marshaling code doesn't have to repeat this calculation
116 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
118 unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
121 ep->rep_max_inline_send =
122 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 ep->rep_max_inline_recv =
124 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
127 /* The client can send a request inline as long as the RPCRDMA header
128 * plus the RPC call fit under the transport's inline limit. If the
129 * combined call message size exceeds that limit, the client must use
130 * a Read chunk for this operation.
132 * A Read chunk is also required if sending the RPC call inline would
133 * exceed this device's max_sge limit.
135 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 struct rpc_rqst *rqst)
138 struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 unsigned int count, remaining, offset;
141 if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
145 remaining = xdr->page_len;
146 offset = offset_in_page(xdr->page_base);
147 count = RPCRDMA_MIN_SEND_SGES;
149 remaining -= min_t(unsigned int,
150 PAGE_SIZE - offset, remaining);
152 if (++count > r_xprt->rx_ia.ri_max_send_sges)
160 /* The client can't know how large the actual reply will be. Thus it
161 * plans for the largest possible reply for that particular ULP
162 * operation. If the maximum combined reply message size exceeds that
163 * limit, the client must provide a write list or a reply chunk for
166 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 struct rpc_rqst *rqst)
169 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
172 /* The client is required to provide a Reply chunk if the maximum
173 * size of the non-payload part of the RPC Reply is larger than
174 * the inline threshold.
177 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 const struct rpc_rqst *rqst)
180 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
182 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 r_xprt->rx_ep.rep_max_inline_recv;
186 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
187 * a byte range. Other modes coalesce these SGEs into a single MR
190 * Returns pointer to next available SGE, and bumps the total number
193 static struct rpcrdma_mr_seg *
194 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
197 u32 remaining, page_offset;
200 base = vec->iov_base;
201 page_offset = offset_in_page(base);
202 remaining = vec->iov_len;
205 seg->mr_offset = base;
206 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
207 remaining -= seg->mr_len;
216 /* Convert @xdrbuf into SGEs no larger than a page each. As they
217 * are registered, these SGEs are then coalesced into RDMA segments
218 * when the selected memreg mode supports it.
220 * Returns positive number of SGEs consumed, or a negative errno.
224 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
225 unsigned int pos, enum rpcrdma_chunktype type,
226 struct rpcrdma_mr_seg *seg)
228 unsigned long page_base;
230 struct page **ppages;
234 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
236 len = xdrbuf->page_len;
237 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
238 page_base = offset_in_page(xdrbuf->page_base);
240 /* ACL likes to be lazy in allocating pages - ACLs
241 * are small by default but can get huge.
243 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
245 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
249 seg->mr_page = *ppages;
250 seg->mr_offset = (char *)page_base;
251 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
259 /* When encoding a Read chunk, the tail iovec contains an
260 * XDR pad and may be omitted.
262 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
265 /* When encoding a Write chunk, some servers need to see an
266 * extra segment for non-XDR-aligned Write chunks. The upper
267 * layer provides space in the tail iovec that may be used
270 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
273 if (xdrbuf->tail[0].iov_len)
274 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
277 if (unlikely(n > RPCRDMA_MAX_SEGS))
283 encode_item_present(struct xdr_stream *xdr)
287 p = xdr_reserve_space(xdr, sizeof(*p));
296 encode_item_not_present(struct xdr_stream *xdr)
300 p = xdr_reserve_space(xdr, sizeof(*p));
309 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
311 *iptr++ = cpu_to_be32(mr->mr_handle);
312 *iptr++ = cpu_to_be32(mr->mr_length);
313 xdr_encode_hyper(iptr, mr->mr_offset);
317 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
321 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
325 xdr_encode_rdma_segment(p, mr);
330 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
335 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
339 *p++ = xdr_one; /* Item present */
340 *p++ = cpu_to_be32(position);
341 xdr_encode_rdma_segment(p, mr);
345 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
346 struct rpcrdma_req *req,
347 struct rpcrdma_mr_seg *seg,
348 int nsegs, bool writing,
349 struct rpcrdma_mr **mr)
351 *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
353 *mr = rpcrdma_mr_get(r_xprt);
356 trace_xprtrdma_mr_get(req);
360 rpcrdma_mr_push(*mr, &req->rl_registered);
361 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
364 trace_xprtrdma_nomrs(req);
365 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
366 if (r_xprt->rx_ep.rep_connected != -ENODEV)
367 schedule_work(&r_xprt->rx_buf.rb_refresh_worker);
368 return ERR_PTR(-EAGAIN);
371 /* Register and XDR encode the Read list. Supports encoding a list of read
372 * segments that belong to a single read chunk.
374 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
376 * Read chunklist (a linked list):
377 * N elements, position P (same P for all chunks of same arg!):
378 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
380 * Returns zero on success, or a negative errno if a failure occurred.
381 * @xdr is advanced to the next position in the stream.
383 * Only a single @pos value is currently supported.
385 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
386 struct rpcrdma_req *req,
387 struct rpc_rqst *rqst,
388 enum rpcrdma_chunktype rtype)
390 struct xdr_stream *xdr = &req->rl_stream;
391 struct rpcrdma_mr_seg *seg;
392 struct rpcrdma_mr *mr;
396 if (rtype == rpcrdma_noch)
399 pos = rqst->rq_snd_buf.head[0].iov_len;
400 if (rtype == rpcrdma_areadch)
402 seg = req->rl_segments;
403 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
409 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
413 if (encode_read_segment(xdr, mr, pos) < 0)
416 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
417 r_xprt->rx_stats.read_chunk_count++;
418 nsegs -= mr->mr_nents;
422 return encode_item_not_present(xdr);
425 /* Register and XDR encode the Write list. Supports encoding a list
426 * containing one array of plain segments that belong to a single
429 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
431 * Write chunklist (a list of (one) counted array):
433 * 1 - N - HLOO - HLOO - ... - HLOO - 0
435 * Returns zero on success, or a negative errno if a failure occurred.
436 * @xdr is advanced to the next position in the stream.
438 * Only a single Write chunk is currently supported.
440 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
441 struct rpcrdma_req *req,
442 struct rpc_rqst *rqst,
443 enum rpcrdma_chunktype wtype)
445 struct xdr_stream *xdr = &req->rl_stream;
446 struct rpcrdma_mr_seg *seg;
447 struct rpcrdma_mr *mr;
451 if (wtype != rpcrdma_writech)
454 seg = req->rl_segments;
455 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
456 rqst->rq_rcv_buf.head[0].iov_len,
461 if (encode_item_present(xdr) < 0)
463 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
464 if (unlikely(!segcount))
466 /* Actual value encoded below */
470 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
474 if (encode_rdma_segment(xdr, mr) < 0)
477 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
478 r_xprt->rx_stats.write_chunk_count++;
479 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
481 nsegs -= mr->mr_nents;
484 /* Update count of segments in this Write chunk */
485 *segcount = cpu_to_be32(nchunks);
488 return encode_item_not_present(xdr);
491 /* Register and XDR encode the Reply chunk. Supports encoding an array
492 * of plain segments that belong to a single write (reply) chunk.
494 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
496 * Reply chunk (a counted array):
498 * 1 - N - HLOO - HLOO - ... - HLOO
500 * Returns zero on success, or a negative errno if a failure occurred.
501 * @xdr is advanced to the next position in the stream.
503 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
504 struct rpcrdma_req *req,
505 struct rpc_rqst *rqst,
506 enum rpcrdma_chunktype wtype)
508 struct xdr_stream *xdr = &req->rl_stream;
509 struct rpcrdma_mr_seg *seg;
510 struct rpcrdma_mr *mr;
514 if (wtype != rpcrdma_replych)
515 return encode_item_not_present(xdr);
517 seg = req->rl_segments;
518 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
522 if (encode_item_present(xdr) < 0)
524 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
525 if (unlikely(!segcount))
527 /* Actual value encoded below */
531 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
535 if (encode_rdma_segment(xdr, mr) < 0)
538 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
539 r_xprt->rx_stats.reply_chunk_count++;
540 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
542 nsegs -= mr->mr_nents;
545 /* Update count of segments in the Reply chunk */
546 *segcount = cpu_to_be32(nchunks);
551 static void rpcrdma_sendctx_done(struct kref *kref)
553 struct rpcrdma_req *req =
554 container_of(kref, struct rpcrdma_req, rl_kref);
555 struct rpcrdma_rep *rep = req->rl_reply;
557 rpcrdma_complete_rqst(rep);
558 rep->rr_rxprt->rx_stats.reply_waits_for_send++;
562 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
563 * @sc: sendctx containing SGEs to unmap
566 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
570 if (!sc->sc_unmap_count)
573 /* The first two SGEs contain the transport header and
574 * the inline buffer. These are always left mapped so
575 * they can be cheaply re-used.
577 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
578 ++sge, --sc->sc_unmap_count)
579 ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
582 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
585 /* Prepare an SGE for the RPC-over-RDMA transport header.
587 static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
588 struct rpcrdma_req *req, u32 len)
590 struct rpcrdma_sendctx *sc = req->rl_sendctx;
591 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
592 struct ib_sge *sge = sc->sc_sges;
594 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
596 sge->addr = rdmab_addr(rb);
598 sge->lkey = rdmab_lkey(rb);
600 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
606 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
610 /* Prepare the Send SGEs. The head and tail iovec, and each entry
611 * in the page list, gets its own SGE.
613 static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
614 struct rpcrdma_req *req,
616 enum rpcrdma_chunktype rtype)
618 struct rpcrdma_sendctx *sc = req->rl_sendctx;
619 unsigned int sge_no, page_base, len, remaining;
620 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
621 struct ib_sge *sge = sc->sc_sges;
622 struct page *page, **ppages;
624 /* The head iovec is straightforward, as it is already
625 * DMA-mapped. Sync the content that has changed.
627 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
629 sc->sc_device = rdmab_device(rb);
631 sge[sge_no].addr = rdmab_addr(rb);
632 sge[sge_no].length = xdr->head[0].iov_len;
633 sge[sge_no].lkey = rdmab_lkey(rb);
634 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
635 sge[sge_no].length, DMA_TO_DEVICE);
637 /* If there is a Read chunk, the page list is being handled
638 * via explicit RDMA, and thus is skipped here. However, the
639 * tail iovec may include an XDR pad for the page list, as
640 * well as additional content, and may not reside in the
641 * same page as the head iovec.
643 if (rtype == rpcrdma_readch) {
644 len = xdr->tail[0].iov_len;
646 /* Do not include the tail if it is only an XDR pad */
650 page = virt_to_page(xdr->tail[0].iov_base);
651 page_base = offset_in_page(xdr->tail[0].iov_base);
653 /* If the content in the page list is an odd length,
654 * xdr_write_pages() has added a pad at the beginning
655 * of the tail iovec. Force the tail's non-pad content
656 * to land at the next XDR position in the Send message.
658 page_base += len & 3;
663 /* If there is a page list present, temporarily DMA map
664 * and prepare an SGE for each page to be sent.
667 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
668 page_base = offset_in_page(xdr->page_base);
669 remaining = xdr->page_len;
672 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
673 goto out_mapping_overflow;
675 len = min_t(u32, PAGE_SIZE - page_base, remaining);
677 ib_dma_map_page(rdmab_device(rb), *ppages,
678 page_base, len, DMA_TO_DEVICE);
679 if (ib_dma_mapping_error(rdmab_device(rb),
681 goto out_mapping_err;
682 sge[sge_no].length = len;
683 sge[sge_no].lkey = rdmab_lkey(rb);
685 sc->sc_unmap_count++;
692 /* The tail iovec is not always constructed in the same
693 * page where the head iovec resides (see, for example,
694 * gss_wrap_req_priv). To neatly accommodate that case,
695 * DMA map it separately.
697 if (xdr->tail[0].iov_len) {
698 page = virt_to_page(xdr->tail[0].iov_base);
699 page_base = offset_in_page(xdr->tail[0].iov_base);
700 len = xdr->tail[0].iov_len;
705 ib_dma_map_page(rdmab_device(rb), page, page_base, len,
707 if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
708 goto out_mapping_err;
709 sge[sge_no].length = len;
710 sge[sge_no].lkey = rdmab_lkey(rb);
711 sc->sc_unmap_count++;
715 sc->sc_wr.num_sge += sge_no;
716 if (sc->sc_unmap_count)
717 kref_get(&req->rl_kref);
721 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
724 out_mapping_overflow:
725 rpcrdma_sendctx_unmap(sc);
726 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
730 rpcrdma_sendctx_unmap(sc);
731 trace_xprtrdma_dma_maperr(sge[sge_no].addr);
736 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
737 * @r_xprt: controlling transport
738 * @req: context of RPC Call being marshalled
739 * @hdrlen: size of transport header, in bytes
740 * @xdr: xdr_buf containing RPC Call
741 * @rtype: chunk type being encoded
743 * Returns 0 on success; otherwise a negative errno is returned.
746 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
747 struct rpcrdma_req *req, u32 hdrlen,
748 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
753 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
754 if (!req->rl_sendctx)
756 req->rl_sendctx->sc_wr.num_sge = 0;
757 req->rl_sendctx->sc_unmap_count = 0;
758 req->rl_sendctx->sc_req = req;
759 kref_init(&req->rl_kref);
762 if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
764 if (rtype != rpcrdma_areadch)
765 if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
770 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
775 * rpcrdma_marshal_req - Marshal and send one RPC request
776 * @r_xprt: controlling transport
777 * @rqst: RPC request to be marshaled
779 * For the RPC in "rqst", this function:
780 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
781 * - Registers Read, Write, and Reply chunks
782 * - Constructs the transport header
783 * - Posts a Send WR to send the transport header and request
786 * %0 if the RPC was sent successfully,
787 * %-ENOTCONN if the connection was lost,
788 * %-EAGAIN if the caller should call again with the same arguments,
789 * %-ENOBUFS if the caller should call again after a delay,
790 * %-EMSGSIZE if the transport header is too small,
791 * %-EIO if a permanent problem occurred while marshaling.
794 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
796 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
797 struct xdr_stream *xdr = &req->rl_stream;
798 enum rpcrdma_chunktype rtype, wtype;
803 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
804 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
807 /* Fixed header fields */
809 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
813 *p++ = rpcrdma_version;
814 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
816 /* When the ULP employs a GSS flavor that guarantees integrity
817 * or privacy, direct data placement of individual data items
820 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
821 RPCAUTH_AUTH_DATATOUCH);
824 * Chunks needed for results?
826 * o If the expected result is under the inline threshold, all ops
828 * o Large read ops return data as write chunk(s), header as
830 * o Large non-read ops return as a single reply chunk.
832 if (rpcrdma_results_inline(r_xprt, rqst))
833 wtype = rpcrdma_noch;
834 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
835 rpcrdma_nonpayload_inline(r_xprt, rqst))
836 wtype = rpcrdma_writech;
838 wtype = rpcrdma_replych;
841 * Chunks needed for arguments?
843 * o If the total request is under the inline threshold, all ops
844 * are sent as inline.
845 * o Large write ops transmit data as read chunk(s), header as
847 * o Large non-write ops are sent with the entire message as a
848 * single read chunk (protocol 0-position special case).
850 * This assumes that the upper layer does not present a request
851 * that both has a data payload, and whose non-data arguments
852 * by themselves are larger than the inline threshold.
854 if (rpcrdma_args_inline(r_xprt, rqst)) {
856 rtype = rpcrdma_noch;
857 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
859 rtype = rpcrdma_readch;
861 r_xprt->rx_stats.nomsg_call_count++;
863 rtype = rpcrdma_areadch;
866 /* If this is a retransmit, discard previously registered
867 * chunks. Very likely the connection has been replaced,
868 * so these registrations are invalid and unusable.
872 /* This implementation supports the following combinations
873 * of chunk lists in one RPC-over-RDMA Call message:
878 * - Read list + Reply chunk
880 * It might not yet support the following combinations:
882 * - Read list + Write list
884 * It does not support the following combinations:
886 * - Write list + Reply chunk
887 * - Read list + Write list + Reply chunk
889 * This implementation supports only a single chunk in each
890 * Read or Write list. Thus for example the client cannot
891 * send a Call message with a Position Zero Read chunk and a
892 * regular Read chunk at the same time.
894 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
897 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
900 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
904 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
905 &rqst->rq_snd_buf, rtype);
909 trace_xprtrdma_marshal(req, rtype, wtype);
913 trace_xprtrdma_marshal_failed(rqst, ret);
914 r_xprt->rx_stats.failed_marshal_count++;
920 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
921 * @rqst: controlling RPC request
922 * @srcp: points to RPC message payload in receive buffer
923 * @copy_len: remaining length of receive buffer content
924 * @pad: Write chunk pad bytes needed (zero for pure inline)
926 * The upper layer has set the maximum number of bytes it can
927 * receive in each component of rq_rcv_buf. These values are set in
928 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
930 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
931 * many cases this function simply updates iov_base pointers in
932 * rq_rcv_buf to point directly to the received reply data, to
933 * avoid copying reply data.
935 * Returns the count of bytes which had to be memcopied.
938 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
940 unsigned long fixup_copy_count;
941 int i, npages, curlen;
943 struct page **ppages;
946 /* The head iovec is redirected to the RPC reply message
947 * in the receive buffer, to avoid a memcopy.
949 rqst->rq_rcv_buf.head[0].iov_base = srcp;
950 rqst->rq_private_buf.head[0].iov_base = srcp;
952 /* The contents of the receive buffer that follow
953 * head.iov_len bytes are copied into the page list.
955 curlen = rqst->rq_rcv_buf.head[0].iov_len;
956 if (curlen > copy_len)
958 trace_xprtrdma_fixup(rqst, copy_len, curlen);
962 ppages = rqst->rq_rcv_buf.pages +
963 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
964 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
965 fixup_copy_count = 0;
966 if (copy_len && rqst->rq_rcv_buf.page_len) {
969 pagelist_len = rqst->rq_rcv_buf.page_len;
970 if (pagelist_len > copy_len)
971 pagelist_len = copy_len;
972 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
973 for (i = 0; i < npages; i++) {
974 curlen = PAGE_SIZE - page_base;
975 if (curlen > pagelist_len)
976 curlen = pagelist_len;
978 trace_xprtrdma_fixup_pg(rqst, i, srcp,
980 destp = kmap_atomic(ppages[i]);
981 memcpy(destp + page_base, srcp, curlen);
982 flush_dcache_page(ppages[i]);
983 kunmap_atomic(destp);
986 fixup_copy_count += curlen;
987 pagelist_len -= curlen;
993 /* Implicit padding for the last segment in a Write
994 * chunk is inserted inline at the front of the tail
995 * iovec. The upper layer ignores the content of
996 * the pad. Simply ensure inline content in the tail
997 * that follows the Write chunk is properly aligned.
1003 /* The tail iovec is redirected to the remaining data
1004 * in the receive buffer, to avoid a memcopy.
1006 if (copy_len || pad) {
1007 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1008 rqst->rq_private_buf.tail[0].iov_base = srcp;
1011 return fixup_copy_count;
1014 /* By convention, backchannel calls arrive via rdma_msg type
1015 * messages, and never populate the chunk lists. This makes
1016 * the RPC/RDMA header small and fixed in size, so it is
1017 * straightforward to check the RPC header's direction field.
1020 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1021 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1023 struct xdr_stream *xdr = &rep->rr_stream;
1026 if (rep->rr_proc != rdma_msg)
1029 /* Peek at stream contents without advancing. */
1030 p = xdr_inline_decode(xdr, 0);
1033 if (*p++ != xdr_zero)
1035 if (*p++ != xdr_zero)
1037 if (*p++ != xdr_zero)
1041 if (*p++ != rep->rr_xid)
1043 if (*p != cpu_to_be32(RPC_CALL))
1046 /* Now that we are sure this is a backchannel call,
1047 * advance to the RPC header.
1049 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1053 rpcrdma_bc_receive_call(r_xprt, rep);
1057 pr_warn("RPC/RDMA short backward direction call\n");
1060 #else /* CONFIG_SUNRPC_BACKCHANNEL */
1064 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1066 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1072 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1076 handle = be32_to_cpup(p++);
1077 *length = be32_to_cpup(p++);
1078 xdr_decode_hyper(p, &offset);
1080 trace_xprtrdma_decode_seg(handle, *length, offset);
1084 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1086 u32 segcount, seglength;
1089 p = xdr_inline_decode(xdr, sizeof(*p));
1094 segcount = be32_to_cpup(p);
1095 while (segcount--) {
1096 if (decode_rdma_segment(xdr, &seglength))
1098 *length += seglength;
1104 /* In RPC-over-RDMA Version One replies, a Read list is never
1105 * expected. This decoder is a stub that returns an error if
1106 * a Read list is present.
1108 static int decode_read_list(struct xdr_stream *xdr)
1112 p = xdr_inline_decode(xdr, sizeof(*p));
1115 if (unlikely(*p != xdr_zero))
1120 /* Supports only one Write chunk in the Write list
1122 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1131 p = xdr_inline_decode(xdr, sizeof(*p));
1139 if (decode_write_chunk(xdr, &chunklen))
1141 *length += chunklen;
1147 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1151 p = xdr_inline_decode(xdr, sizeof(*p));
1157 if (decode_write_chunk(xdr, length))
1163 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1164 struct rpc_rqst *rqst)
1166 struct xdr_stream *xdr = &rep->rr_stream;
1167 u32 writelist, replychunk, rpclen;
1170 /* Decode the chunk lists */
1171 if (decode_read_list(xdr))
1173 if (decode_write_list(xdr, &writelist))
1175 if (decode_reply_chunk(xdr, &replychunk))
1178 /* RDMA_MSG sanity checks */
1179 if (unlikely(replychunk))
1182 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1183 base = (char *)xdr_inline_decode(xdr, 0);
1184 rpclen = xdr_stream_remaining(xdr);
1185 r_xprt->rx_stats.fixup_copy_count +=
1186 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1188 r_xprt->rx_stats.total_rdma_reply += writelist;
1189 return rpclen + xdr_align_size(writelist);
1193 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1195 struct xdr_stream *xdr = &rep->rr_stream;
1196 u32 writelist, replychunk;
1198 /* Decode the chunk lists */
1199 if (decode_read_list(xdr))
1201 if (decode_write_list(xdr, &writelist))
1203 if (decode_reply_chunk(xdr, &replychunk))
1206 /* RDMA_NOMSG sanity checks */
1207 if (unlikely(writelist))
1209 if (unlikely(!replychunk))
1212 /* Reply chunk buffer already is the reply vector */
1213 r_xprt->rx_stats.total_rdma_reply += replychunk;
1218 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1219 struct rpc_rqst *rqst)
1221 struct xdr_stream *xdr = &rep->rr_stream;
1224 p = xdr_inline_decode(xdr, sizeof(*p));
1230 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1233 dprintk("RPC: %s: server reports "
1234 "version error (%u-%u), xid %08x\n", __func__,
1235 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1236 be32_to_cpu(rep->rr_xid));
1239 dprintk("RPC: %s: server reports "
1240 "header decoding error, xid %08x\n", __func__,
1241 be32_to_cpu(rep->rr_xid));
1244 dprintk("RPC: %s: server reports "
1245 "unrecognized error %d, xid %08x\n", __func__,
1246 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1249 r_xprt->rx_stats.bad_reply_count++;
1253 /* Perform XID lookup, reconstruction of the RPC reply, and
1254 * RPC completion while holding the transport lock to ensure
1255 * the rep, rqst, and rq_task pointers remain stable.
1257 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1259 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1260 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1261 struct rpc_rqst *rqst = rep->rr_rqst;
1264 switch (rep->rr_proc) {
1266 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1269 status = rpcrdma_decode_nomsg(r_xprt, rep);
1272 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1281 spin_lock(&xprt->queue_lock);
1282 xprt_complete_rqst(rqst->rq_task, status);
1283 xprt_unpin_rqst(rqst);
1284 spin_unlock(&xprt->queue_lock);
1287 /* If the incoming reply terminated a pending RPC, the next
1288 * RPC call will post a replacement receive buffer as it is
1292 trace_xprtrdma_reply_hdr(rep);
1293 r_xprt->rx_stats.bad_reply_count++;
1297 static void rpcrdma_reply_done(struct kref *kref)
1299 struct rpcrdma_req *req =
1300 container_of(kref, struct rpcrdma_req, rl_kref);
1302 rpcrdma_complete_rqst(req->rl_reply);
1306 * rpcrdma_reply_handler - Process received RPC/RDMA messages
1307 * @rep: Incoming rpcrdma_rep object to process
1309 * Errors must result in the RPC task either being awakened, or
1310 * allowed to timeout, to discover the errors at that time.
1312 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1314 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1315 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1316 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1317 struct rpcrdma_req *req;
1318 struct rpc_rqst *rqst;
1322 /* Any data means we had a useful conversation, so
1323 * then we don't need to delay the next reconnect.
1325 if (xprt->reestablish_timeout)
1326 xprt->reestablish_timeout = 0;
1328 /* Fixed transport header fields */
1329 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1330 rep->rr_hdrbuf.head[0].iov_base, NULL);
1331 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1333 goto out_shortreply;
1335 rep->rr_vers = *p++;
1336 credits = be32_to_cpu(*p++);
1337 rep->rr_proc = *p++;
1339 if (rep->rr_vers != rpcrdma_version)
1340 goto out_badversion;
1342 if (rpcrdma_is_bcall(r_xprt, rep))
1345 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1346 * get context for handling any incoming chunks.
1348 spin_lock(&xprt->queue_lock);
1349 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1352 xprt_pin_rqst(rqst);
1353 spin_unlock(&xprt->queue_lock);
1356 credits = 1; /* don't deadlock */
1357 else if (credits > buf->rb_max_requests)
1358 credits = buf->rb_max_requests;
1359 if (buf->rb_credits != credits) {
1360 spin_lock(&xprt->transport_lock);
1361 buf->rb_credits = credits;
1362 xprt->cwnd = credits << RPC_CWNDSHIFT;
1363 spin_unlock(&xprt->transport_lock);
1366 req = rpcr_to_rdmar(rqst);
1367 if (req->rl_reply) {
1368 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1369 rpcrdma_recv_buffer_put(req->rl_reply);
1371 req->rl_reply = rep;
1372 rep->rr_rqst = rqst;
1374 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1376 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1377 frwr_reminv(rep, &req->rl_registered);
1378 if (!list_empty(&req->rl_registered))
1379 frwr_unmap_async(r_xprt, req);
1380 /* LocalInv completion will complete the RPC */
1382 kref_put(&req->rl_kref, rpcrdma_reply_done);
1386 trace_xprtrdma_reply_vers(rep);
1390 spin_unlock(&xprt->queue_lock);
1391 trace_xprtrdma_reply_rqst(rep);
1395 trace_xprtrdma_reply_short(rep);
1398 rpcrdma_recv_buffer_put(rep);