1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * This file contains the guts of the RPC RDMA protocol, and
46 * does marshaling/unmarshaling, etc. It is also where interfacing
47 * to the Linux RPC framework lives.
50 #include <linux/highmem.h>
52 #include <linux/sunrpc/svc_rdma.h>
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY RPCDBG_TRANS
61 /* Returns size of largest RPC-over-RDMA header in a Call message
63 * The largest Call header contains a full-size Read list and a
64 * minimal Reply chunk.
66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
70 /* Fixed header fields and list discriminators */
71 size = RPCRDMA_HDRLEN_MIN;
73 /* Maximum Read list size */
74 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
76 /* Minimal Read chunk size */
77 size += sizeof(__be32); /* segment count */
78 size += rpcrdma_segment_maxsz * sizeof(__be32);
79 size += sizeof(__be32); /* list discriminator */
81 dprintk("RPC: %s: max call header size = %u\n",
86 /* Returns size of largest RPC-over-RDMA header in a Reply message
88 * There is only one Write list or one Reply chunk per Reply
89 * message. The larger list is the Write list.
91 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
95 /* Fixed header fields and list discriminators */
96 size = RPCRDMA_HDRLEN_MIN;
98 /* Maximum Write list size */
99 size = sizeof(__be32); /* segment count */
100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
101 size += sizeof(__be32); /* list discriminator */
103 dprintk("RPC: %s: max reply header size = %u\n",
109 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110 * @r_xprt: transport instance to initialize
112 * The max_inline fields contain the maximum size of an RPC message
113 * so the marshaling code doesn't have to repeat this calculation
116 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
118 unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
121 ep->rep_max_inline_send =
122 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 ep->rep_max_inline_recv =
124 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
127 /* The client can send a request inline as long as the RPCRDMA header
128 * plus the RPC call fit under the transport's inline limit. If the
129 * combined call message size exceeds that limit, the client must use
130 * a Read chunk for this operation.
132 * A Read chunk is also required if sending the RPC call inline would
133 * exceed this device's max_sge limit.
135 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 struct rpc_rqst *rqst)
138 struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 unsigned int count, remaining, offset;
141 if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
145 remaining = xdr->page_len;
146 offset = offset_in_page(xdr->page_base);
147 count = RPCRDMA_MIN_SEND_SGES;
149 remaining -= min_t(unsigned int,
150 PAGE_SIZE - offset, remaining);
152 if (++count > r_xprt->rx_ia.ri_max_send_sges)
160 /* The client can't know how large the actual reply will be. Thus it
161 * plans for the largest possible reply for that particular ULP
162 * operation. If the maximum combined reply message size exceeds that
163 * limit, the client must provide a write list or a reply chunk for
166 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 struct rpc_rqst *rqst)
169 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
172 /* The client is required to provide a Reply chunk if the maximum
173 * size of the non-payload part of the RPC Reply is larger than
174 * the inline threshold.
177 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 const struct rpc_rqst *rqst)
180 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
182 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 r_xprt->rx_ep.rep_max_inline_recv;
186 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
187 * a byte range. Other modes coalesce these SGEs into a single MR
190 * Returns pointer to next available SGE, and bumps the total number
193 static struct rpcrdma_mr_seg *
194 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
197 u32 remaining, page_offset;
200 base = vec->iov_base;
201 page_offset = offset_in_page(base);
202 remaining = vec->iov_len;
205 seg->mr_offset = base;
206 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
207 remaining -= seg->mr_len;
216 /* Convert @xdrbuf into SGEs no larger than a page each. As they
217 * are registered, these SGEs are then coalesced into RDMA segments
218 * when the selected memreg mode supports it.
220 * Returns positive number of SGEs consumed, or a negative errno.
224 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
225 unsigned int pos, enum rpcrdma_chunktype type,
226 struct rpcrdma_mr_seg *seg)
228 unsigned long page_base;
230 struct page **ppages;
234 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
236 len = xdrbuf->page_len;
237 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
238 page_base = offset_in_page(xdrbuf->page_base);
240 /* ACL likes to be lazy in allocating pages - ACLs
241 * are small by default but can get huge.
243 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
245 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
249 seg->mr_page = *ppages;
250 seg->mr_offset = (char *)page_base;
251 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
259 /* When encoding a Read chunk, the tail iovec contains an
260 * XDR pad and may be omitted.
262 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
265 /* When encoding a Write chunk, some servers need to see an
266 * extra segment for non-XDR-aligned Write chunks. The upper
267 * layer provides space in the tail iovec that may be used
270 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
273 if (xdrbuf->tail[0].iov_len)
274 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
277 if (unlikely(n > RPCRDMA_MAX_SEGS))
283 encode_item_present(struct xdr_stream *xdr)
287 p = xdr_reserve_space(xdr, sizeof(*p));
296 encode_item_not_present(struct xdr_stream *xdr)
300 p = xdr_reserve_space(xdr, sizeof(*p));
309 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
311 *iptr++ = cpu_to_be32(mr->mr_handle);
312 *iptr++ = cpu_to_be32(mr->mr_length);
313 xdr_encode_hyper(iptr, mr->mr_offset);
317 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
321 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
325 xdr_encode_rdma_segment(p, mr);
330 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
335 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
339 *p++ = xdr_one; /* Item present */
340 *p++ = cpu_to_be32(position);
341 xdr_encode_rdma_segment(p, mr);
345 /* Register and XDR encode the Read list. Supports encoding a list of read
346 * segments that belong to a single read chunk.
348 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
350 * Read chunklist (a linked list):
351 * N elements, position P (same P for all chunks of same arg!):
352 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
354 * Returns zero on success, or a negative errno if a failure occurred.
355 * @xdr is advanced to the next position in the stream.
357 * Only a single @pos value is currently supported.
360 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
361 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
363 struct xdr_stream *xdr = &req->rl_stream;
364 struct rpcrdma_mr_seg *seg;
365 struct rpcrdma_mr *mr;
369 pos = rqst->rq_snd_buf.head[0].iov_len;
370 if (rtype == rpcrdma_areadch)
372 seg = req->rl_segments;
373 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
379 seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
382 rpcrdma_mr_push(mr, &req->rl_registered);
384 if (encode_read_segment(xdr, mr, pos) < 0)
387 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
388 r_xprt->rx_stats.read_chunk_count++;
389 nsegs -= mr->mr_nents;
395 /* Register and XDR encode the Write list. Supports encoding a list
396 * containing one array of plain segments that belong to a single
399 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
401 * Write chunklist (a list of (one) counted array):
403 * 1 - N - HLOO - HLOO - ... - HLOO - 0
405 * Returns zero on success, or a negative errno if a failure occurred.
406 * @xdr is advanced to the next position in the stream.
408 * Only a single Write chunk is currently supported.
411 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
412 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
414 struct xdr_stream *xdr = &req->rl_stream;
415 struct rpcrdma_mr_seg *seg;
416 struct rpcrdma_mr *mr;
420 seg = req->rl_segments;
421 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
422 rqst->rq_rcv_buf.head[0].iov_len,
427 if (encode_item_present(xdr) < 0)
429 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
430 if (unlikely(!segcount))
432 /* Actual value encoded below */
436 seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
439 rpcrdma_mr_push(mr, &req->rl_registered);
441 if (encode_rdma_segment(xdr, mr) < 0)
444 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
445 r_xprt->rx_stats.write_chunk_count++;
446 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
448 nsegs -= mr->mr_nents;
451 /* Update count of segments in this Write chunk */
452 *segcount = cpu_to_be32(nchunks);
457 /* Register and XDR encode the Reply chunk. Supports encoding an array
458 * of plain segments that belong to a single write (reply) chunk.
460 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
462 * Reply chunk (a counted array):
464 * 1 - N - HLOO - HLOO - ... - HLOO
466 * Returns zero on success, or a negative errno if a failure occurred.
467 * @xdr is advanced to the next position in the stream.
470 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
471 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
473 struct xdr_stream *xdr = &req->rl_stream;
474 struct rpcrdma_mr_seg *seg;
475 struct rpcrdma_mr *mr;
479 seg = req->rl_segments;
480 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
484 if (encode_item_present(xdr) < 0)
486 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
487 if (unlikely(!segcount))
489 /* Actual value encoded below */
493 seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
496 rpcrdma_mr_push(mr, &req->rl_registered);
498 if (encode_rdma_segment(xdr, mr) < 0)
501 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
502 r_xprt->rx_stats.reply_chunk_count++;
503 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
505 nsegs -= mr->mr_nents;
508 /* Update count of segments in the Reply chunk */
509 *segcount = cpu_to_be32(nchunks);
515 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
516 * @sc: sendctx containing SGEs to unmap
519 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
523 /* The first two SGEs contain the transport header and
524 * the inline buffer. These are always left mapped so
525 * they can be cheaply re-used.
527 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
528 ++sge, --sc->sc_unmap_count)
529 ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
532 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES,
533 &sc->sc_req->rl_flags))
534 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
537 /* Prepare an SGE for the RPC-over-RDMA transport header.
539 static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
540 struct rpcrdma_req *req, u32 len)
542 struct rpcrdma_sendctx *sc = req->rl_sendctx;
543 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
544 struct ib_sge *sge = sc->sc_sges;
546 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
548 sge->addr = rdmab_addr(rb);
550 sge->lkey = rdmab_lkey(rb);
552 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
558 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
562 /* Prepare the Send SGEs. The head and tail iovec, and each entry
563 * in the page list, gets its own SGE.
565 static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
566 struct rpcrdma_req *req,
568 enum rpcrdma_chunktype rtype)
570 struct rpcrdma_sendctx *sc = req->rl_sendctx;
571 unsigned int sge_no, page_base, len, remaining;
572 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
573 struct ib_sge *sge = sc->sc_sges;
574 struct page *page, **ppages;
576 /* The head iovec is straightforward, as it is already
577 * DMA-mapped. Sync the content that has changed.
579 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
581 sc->sc_device = rdmab_device(rb);
583 sge[sge_no].addr = rdmab_addr(rb);
584 sge[sge_no].length = xdr->head[0].iov_len;
585 sge[sge_no].lkey = rdmab_lkey(rb);
586 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
587 sge[sge_no].length, DMA_TO_DEVICE);
589 /* If there is a Read chunk, the page list is being handled
590 * via explicit RDMA, and thus is skipped here. However, the
591 * tail iovec may include an XDR pad for the page list, as
592 * well as additional content, and may not reside in the
593 * same page as the head iovec.
595 if (rtype == rpcrdma_readch) {
596 len = xdr->tail[0].iov_len;
598 /* Do not include the tail if it is only an XDR pad */
602 page = virt_to_page(xdr->tail[0].iov_base);
603 page_base = offset_in_page(xdr->tail[0].iov_base);
605 /* If the content in the page list is an odd length,
606 * xdr_write_pages() has added a pad at the beginning
607 * of the tail iovec. Force the tail's non-pad content
608 * to land at the next XDR position in the Send message.
610 page_base += len & 3;
615 /* If there is a page list present, temporarily DMA map
616 * and prepare an SGE for each page to be sent.
619 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
620 page_base = offset_in_page(xdr->page_base);
621 remaining = xdr->page_len;
624 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
625 goto out_mapping_overflow;
627 len = min_t(u32, PAGE_SIZE - page_base, remaining);
629 ib_dma_map_page(rdmab_device(rb), *ppages,
630 page_base, len, DMA_TO_DEVICE);
631 if (ib_dma_mapping_error(rdmab_device(rb),
633 goto out_mapping_err;
634 sge[sge_no].length = len;
635 sge[sge_no].lkey = rdmab_lkey(rb);
637 sc->sc_unmap_count++;
644 /* The tail iovec is not always constructed in the same
645 * page where the head iovec resides (see, for example,
646 * gss_wrap_req_priv). To neatly accommodate that case,
647 * DMA map it separately.
649 if (xdr->tail[0].iov_len) {
650 page = virt_to_page(xdr->tail[0].iov_base);
651 page_base = offset_in_page(xdr->tail[0].iov_base);
652 len = xdr->tail[0].iov_len;
657 ib_dma_map_page(rdmab_device(rb), page, page_base, len,
659 if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
660 goto out_mapping_err;
661 sge[sge_no].length = len;
662 sge[sge_no].lkey = rdmab_lkey(rb);
663 sc->sc_unmap_count++;
667 sc->sc_wr.num_sge += sge_no;
668 if (sc->sc_unmap_count)
669 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
673 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
676 out_mapping_overflow:
677 rpcrdma_sendctx_unmap(sc);
678 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
682 rpcrdma_sendctx_unmap(sc);
683 trace_xprtrdma_dma_maperr(sge[sge_no].addr);
688 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
689 * @r_xprt: controlling transport
690 * @req: context of RPC Call being marshalled
691 * @hdrlen: size of transport header, in bytes
692 * @xdr: xdr_buf containing RPC Call
693 * @rtype: chunk type being encoded
695 * Returns 0 on success; otherwise a negative errno is returned.
698 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
699 struct rpcrdma_req *req, u32 hdrlen,
700 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
705 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
706 if (!req->rl_sendctx)
708 req->rl_sendctx->sc_wr.num_sge = 0;
709 req->rl_sendctx->sc_unmap_count = 0;
710 req->rl_sendctx->sc_req = req;
711 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
714 if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
716 if (rtype != rpcrdma_areadch)
717 if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
722 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
727 * rpcrdma_marshal_req - Marshal and send one RPC request
728 * @r_xprt: controlling transport
729 * @rqst: RPC request to be marshaled
731 * For the RPC in "rqst", this function:
732 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
733 * - Registers Read, Write, and Reply chunks
734 * - Constructs the transport header
735 * - Posts a Send WR to send the transport header and request
738 * %0 if the RPC was sent successfully,
739 * %-ENOTCONN if the connection was lost,
740 * %-EAGAIN if the caller should call again with the same arguments,
741 * %-ENOBUFS if the caller should call again after a delay,
742 * %-EMSGSIZE if the transport header is too small,
743 * %-EIO if a permanent problem occurred while marshaling.
746 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
748 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
749 struct xdr_stream *xdr = &req->rl_stream;
750 enum rpcrdma_chunktype rtype, wtype;
755 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
756 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
759 /* Fixed header fields */
761 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
765 *p++ = rpcrdma_version;
766 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
768 /* When the ULP employs a GSS flavor that guarantees integrity
769 * or privacy, direct data placement of individual data items
772 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
773 RPCAUTH_AUTH_DATATOUCH);
776 * Chunks needed for results?
778 * o If the expected result is under the inline threshold, all ops
780 * o Large read ops return data as write chunk(s), header as
782 * o Large non-read ops return as a single reply chunk.
784 if (rpcrdma_results_inline(r_xprt, rqst))
785 wtype = rpcrdma_noch;
786 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
787 rpcrdma_nonpayload_inline(r_xprt, rqst))
788 wtype = rpcrdma_writech;
790 wtype = rpcrdma_replych;
793 * Chunks needed for arguments?
795 * o If the total request is under the inline threshold, all ops
796 * are sent as inline.
797 * o Large write ops transmit data as read chunk(s), header as
799 * o Large non-write ops are sent with the entire message as a
800 * single read chunk (protocol 0-position special case).
802 * This assumes that the upper layer does not present a request
803 * that both has a data payload, and whose non-data arguments
804 * by themselves are larger than the inline threshold.
806 if (rpcrdma_args_inline(r_xprt, rqst)) {
808 rtype = rpcrdma_noch;
809 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
811 rtype = rpcrdma_readch;
813 r_xprt->rx_stats.nomsg_call_count++;
815 rtype = rpcrdma_areadch;
818 /* If this is a retransmit, discard previously registered
819 * chunks. Very likely the connection has been replaced,
820 * so these registrations are invalid and unusable.
822 while (unlikely(!list_empty(&req->rl_registered))) {
823 struct rpcrdma_mr *mr;
825 mr = rpcrdma_mr_pop(&req->rl_registered);
826 rpcrdma_mr_recycle(mr);
829 /* This implementation supports the following combinations
830 * of chunk lists in one RPC-over-RDMA Call message:
835 * - Read list + Reply chunk
837 * It might not yet support the following combinations:
839 * - Read list + Write list
841 * It does not support the following combinations:
843 * - Write list + Reply chunk
844 * - Read list + Write list + Reply chunk
846 * This implementation supports only a single chunk in each
847 * Read or Write list. Thus for example the client cannot
848 * send a Call message with a Position Zero Read chunk and a
849 * regular Read chunk at the same time.
851 if (rtype != rpcrdma_noch) {
852 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
856 ret = encode_item_not_present(xdr);
860 if (wtype == rpcrdma_writech) {
861 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
865 ret = encode_item_not_present(xdr);
869 if (wtype != rpcrdma_replych)
870 ret = encode_item_not_present(xdr);
872 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
876 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
877 &rqst->rq_snd_buf, rtype);
881 trace_xprtrdma_marshal(req, rtype, wtype);
885 trace_xprtrdma_marshal_failed(rqst, ret);
886 r_xprt->rx_stats.failed_marshal_count++;
892 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
893 * @rqst: controlling RPC request
894 * @srcp: points to RPC message payload in receive buffer
895 * @copy_len: remaining length of receive buffer content
896 * @pad: Write chunk pad bytes needed (zero for pure inline)
898 * The upper layer has set the maximum number of bytes it can
899 * receive in each component of rq_rcv_buf. These values are set in
900 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
902 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
903 * many cases this function simply updates iov_base pointers in
904 * rq_rcv_buf to point directly to the received reply data, to
905 * avoid copying reply data.
907 * Returns the count of bytes which had to be memcopied.
910 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
912 unsigned long fixup_copy_count;
913 int i, npages, curlen;
915 struct page **ppages;
918 /* The head iovec is redirected to the RPC reply message
919 * in the receive buffer, to avoid a memcopy.
921 rqst->rq_rcv_buf.head[0].iov_base = srcp;
922 rqst->rq_private_buf.head[0].iov_base = srcp;
924 /* The contents of the receive buffer that follow
925 * head.iov_len bytes are copied into the page list.
927 curlen = rqst->rq_rcv_buf.head[0].iov_len;
928 if (curlen > copy_len)
930 trace_xprtrdma_fixup(rqst, copy_len, curlen);
934 ppages = rqst->rq_rcv_buf.pages +
935 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
936 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
937 fixup_copy_count = 0;
938 if (copy_len && rqst->rq_rcv_buf.page_len) {
941 pagelist_len = rqst->rq_rcv_buf.page_len;
942 if (pagelist_len > copy_len)
943 pagelist_len = copy_len;
944 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
945 for (i = 0; i < npages; i++) {
946 curlen = PAGE_SIZE - page_base;
947 if (curlen > pagelist_len)
948 curlen = pagelist_len;
950 trace_xprtrdma_fixup_pg(rqst, i, srcp,
952 destp = kmap_atomic(ppages[i]);
953 memcpy(destp + page_base, srcp, curlen);
954 flush_dcache_page(ppages[i]);
955 kunmap_atomic(destp);
958 fixup_copy_count += curlen;
959 pagelist_len -= curlen;
965 /* Implicit padding for the last segment in a Write
966 * chunk is inserted inline at the front of the tail
967 * iovec. The upper layer ignores the content of
968 * the pad. Simply ensure inline content in the tail
969 * that follows the Write chunk is properly aligned.
975 /* The tail iovec is redirected to the remaining data
976 * in the receive buffer, to avoid a memcopy.
978 if (copy_len || pad) {
979 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
980 rqst->rq_private_buf.tail[0].iov_base = srcp;
983 return fixup_copy_count;
986 /* By convention, backchannel calls arrive via rdma_msg type
987 * messages, and never populate the chunk lists. This makes
988 * the RPC/RDMA header small and fixed in size, so it is
989 * straightforward to check the RPC header's direction field.
992 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
993 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
995 struct xdr_stream *xdr = &rep->rr_stream;
998 if (rep->rr_proc != rdma_msg)
1001 /* Peek at stream contents without advancing. */
1002 p = xdr_inline_decode(xdr, 0);
1005 if (*p++ != xdr_zero)
1007 if (*p++ != xdr_zero)
1009 if (*p++ != xdr_zero)
1013 if (*p++ != rep->rr_xid)
1015 if (*p != cpu_to_be32(RPC_CALL))
1018 /* Now that we are sure this is a backchannel call,
1019 * advance to the RPC header.
1021 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1025 rpcrdma_bc_receive_call(r_xprt, rep);
1029 pr_warn("RPC/RDMA short backward direction call\n");
1032 #else /* CONFIG_SUNRPC_BACKCHANNEL */
1036 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1038 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1044 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1048 handle = be32_to_cpup(p++);
1049 *length = be32_to_cpup(p++);
1050 xdr_decode_hyper(p, &offset);
1052 trace_xprtrdma_decode_seg(handle, *length, offset);
1056 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1058 u32 segcount, seglength;
1061 p = xdr_inline_decode(xdr, sizeof(*p));
1066 segcount = be32_to_cpup(p);
1067 while (segcount--) {
1068 if (decode_rdma_segment(xdr, &seglength))
1070 *length += seglength;
1076 /* In RPC-over-RDMA Version One replies, a Read list is never
1077 * expected. This decoder is a stub that returns an error if
1078 * a Read list is present.
1080 static int decode_read_list(struct xdr_stream *xdr)
1084 p = xdr_inline_decode(xdr, sizeof(*p));
1087 if (unlikely(*p != xdr_zero))
1092 /* Supports only one Write chunk in the Write list
1094 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1103 p = xdr_inline_decode(xdr, sizeof(*p));
1111 if (decode_write_chunk(xdr, &chunklen))
1113 *length += chunklen;
1119 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1123 p = xdr_inline_decode(xdr, sizeof(*p));
1129 if (decode_write_chunk(xdr, length))
1135 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1136 struct rpc_rqst *rqst)
1138 struct xdr_stream *xdr = &rep->rr_stream;
1139 u32 writelist, replychunk, rpclen;
1142 /* Decode the chunk lists */
1143 if (decode_read_list(xdr))
1145 if (decode_write_list(xdr, &writelist))
1147 if (decode_reply_chunk(xdr, &replychunk))
1150 /* RDMA_MSG sanity checks */
1151 if (unlikely(replychunk))
1154 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1155 base = (char *)xdr_inline_decode(xdr, 0);
1156 rpclen = xdr_stream_remaining(xdr);
1157 r_xprt->rx_stats.fixup_copy_count +=
1158 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1160 r_xprt->rx_stats.total_rdma_reply += writelist;
1161 return rpclen + xdr_align_size(writelist);
1165 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1167 struct xdr_stream *xdr = &rep->rr_stream;
1168 u32 writelist, replychunk;
1170 /* Decode the chunk lists */
1171 if (decode_read_list(xdr))
1173 if (decode_write_list(xdr, &writelist))
1175 if (decode_reply_chunk(xdr, &replychunk))
1178 /* RDMA_NOMSG sanity checks */
1179 if (unlikely(writelist))
1181 if (unlikely(!replychunk))
1184 /* Reply chunk buffer already is the reply vector */
1185 r_xprt->rx_stats.total_rdma_reply += replychunk;
1190 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1191 struct rpc_rqst *rqst)
1193 struct xdr_stream *xdr = &rep->rr_stream;
1196 p = xdr_inline_decode(xdr, sizeof(*p));
1202 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1205 dprintk("RPC: %s: server reports "
1206 "version error (%u-%u), xid %08x\n", __func__,
1207 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1208 be32_to_cpu(rep->rr_xid));
1211 dprintk("RPC: %s: server reports "
1212 "header decoding error, xid %08x\n", __func__,
1213 be32_to_cpu(rep->rr_xid));
1216 dprintk("RPC: %s: server reports "
1217 "unrecognized error %d, xid %08x\n", __func__,
1218 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1221 r_xprt->rx_stats.bad_reply_count++;
1225 /* Perform XID lookup, reconstruction of the RPC reply, and
1226 * RPC completion while holding the transport lock to ensure
1227 * the rep, rqst, and rq_task pointers remain stable.
1229 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1231 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1232 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1233 struct rpc_rqst *rqst = rep->rr_rqst;
1236 xprt->reestablish_timeout = 0;
1238 switch (rep->rr_proc) {
1240 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1243 status = rpcrdma_decode_nomsg(r_xprt, rep);
1246 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1255 spin_lock(&xprt->queue_lock);
1256 xprt_complete_rqst(rqst->rq_task, status);
1257 xprt_unpin_rqst(rqst);
1258 spin_unlock(&xprt->queue_lock);
1261 /* If the incoming reply terminated a pending RPC, the next
1262 * RPC call will post a replacement receive buffer as it is
1266 trace_xprtrdma_reply_hdr(rep);
1267 r_xprt->rx_stats.bad_reply_count++;
1271 void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1273 /* Invalidate and unmap the data payloads before waking
1274 * the waiting application. This guarantees the memory
1275 * regions are properly fenced from the server before the
1276 * application accesses the data. It also ensures proper
1277 * send flow control: waking the next RPC waits until this
1278 * RPC has relinquished all its Send Queue entries.
1280 if (!list_empty(&req->rl_registered))
1281 frwr_unmap_sync(r_xprt, req);
1283 /* Ensure that any DMA mapped pages associated with
1284 * the Send of the RPC Call have been unmapped before
1285 * allowing the RPC to complete. This protects argument
1286 * memory not controlled by the RPC client from being
1287 * re-used before we're done with it.
1289 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1290 r_xprt->rx_stats.reply_waits_for_send++;
1291 out_of_line_wait_on_bit(&req->rl_flags,
1292 RPCRDMA_REQ_F_TX_RESOURCES,
1294 TASK_UNINTERRUPTIBLE);
1298 /* Reply handling runs in the poll worker thread. Anything that
1299 * might wait is deferred to a separate workqueue.
1301 void rpcrdma_deferred_completion(struct work_struct *work)
1303 struct rpcrdma_rep *rep =
1304 container_of(work, struct rpcrdma_rep, rr_work);
1305 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1306 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1308 trace_xprtrdma_defer_cmp(rep);
1309 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1310 frwr_reminv(rep, &req->rl_registered);
1311 rpcrdma_release_rqst(r_xprt, req);
1312 rpcrdma_complete_rqst(rep);
1315 /* Process received RPC/RDMA messages.
1317 * Errors must result in the RPC task either being awakened, or
1318 * allowed to timeout, to discover the errors at that time.
1320 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1322 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1323 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1324 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1325 struct rpcrdma_req *req;
1326 struct rpc_rqst *rqst;
1330 /* Fixed transport header fields */
1331 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1332 rep->rr_hdrbuf.head[0].iov_base, NULL);
1333 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1335 goto out_shortreply;
1337 rep->rr_vers = *p++;
1338 credits = be32_to_cpu(*p++);
1339 rep->rr_proc = *p++;
1341 if (rep->rr_vers != rpcrdma_version)
1342 goto out_badversion;
1344 if (rpcrdma_is_bcall(r_xprt, rep))
1347 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1348 * get context for handling any incoming chunks.
1350 spin_lock(&xprt->queue_lock);
1351 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1354 xprt_pin_rqst(rqst);
1355 spin_unlock(&xprt->queue_lock);
1358 credits = 1; /* don't deadlock */
1359 else if (credits > buf->rb_max_requests)
1360 credits = buf->rb_max_requests;
1361 if (buf->rb_credits != credits) {
1362 spin_lock_bh(&xprt->transport_lock);
1363 buf->rb_credits = credits;
1364 xprt->cwnd = credits << RPC_CWNDSHIFT;
1365 spin_unlock_bh(&xprt->transport_lock);
1368 req = rpcr_to_rdmar(rqst);
1369 if (req->rl_reply) {
1370 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1371 rpcrdma_recv_buffer_put(req->rl_reply);
1373 req->rl_reply = rep;
1374 rep->rr_rqst = rqst;
1376 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1377 queue_work(buf->rb_completion_wq, &rep->rr_work);
1381 trace_xprtrdma_reply_vers(rep);
1385 spin_unlock(&xprt->queue_lock);
1386 trace_xprtrdma_reply_rqst(rep);
1390 trace_xprtrdma_reply_short(rep);
1393 rpcrdma_recv_buffer_put(rep);