1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015 Oracle. All rights reserved.
5 * Support for backward direction RPCs on RPC/RDMA.
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
11 #include <linux/sunrpc/svc_rdma.h>
13 #include "xprt_rdma.h"
14 #include <trace/events/rpcrdma.h>
16 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17 # define RPCDBG_FACILITY RPCDBG_TRANS
20 #undef RPCRDMA_BACKCHANNEL_DEBUG
23 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
24 * @xprt: transport associated with these backchannel resources
25 * @reqs: number of concurrent incoming requests to expect
27 * Returns 0 on success; otherwise a negative errno
29 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
31 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
33 r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
34 trace_xprtrdma_cb_setup(r_xprt, reqs);
39 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
42 * Returns maximum size, in bytes, of a backchannel message
44 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
46 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
47 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
50 maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv);
51 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
52 return maxmsg - RPCRDMA_HDRLEN_MIN;
55 unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
57 return RPCRDMA_BACKWARD_WRS >> 1;
60 static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
62 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
63 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
66 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
67 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
68 rdmab_data(req->rl_rdmabuf), rqst);
70 p = xdr_reserve_space(&req->rl_stream, 28);
74 *p++ = rpcrdma_version;
75 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
81 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
82 &rqst->rq_snd_buf, rpcrdma_noch))
85 trace_xprtrdma_cb_reply(rqst);
90 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
91 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
93 * Caller holds the transport's write lock.
96 * %0 if the RPC message has been sent
97 * %-ENOTCONN if the caller should reconnect and call again
98 * %-EIO if a permanent error occurred and the request was not
99 * sent. Do not try to send this message again.
101 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
103 struct rpc_xprt *xprt = rqst->rq_xprt;
104 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
105 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
108 if (!xprt_connected(xprt))
111 if (!xprt_request_get_cong(xprt, rqst))
114 rc = rpcrdma_bc_marshal_reply(rqst);
118 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
119 goto drop_connection;
126 xprt_rdma_close(xprt);
131 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
132 * @xprt: transport associated with these backchannel resources
133 * @reqs: number of incoming requests to destroy; ignored
135 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
137 struct rpc_rqst *rqst, *tmp;
139 spin_lock(&xprt->bc_pa_lock);
140 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
141 list_del(&rqst->rq_bc_pa_list);
142 spin_unlock(&xprt->bc_pa_lock);
144 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
146 spin_lock(&xprt->bc_pa_lock);
148 spin_unlock(&xprt->bc_pa_lock);
152 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
153 * @rqst: request to release
155 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
157 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
158 struct rpc_xprt *xprt = rqst->rq_xprt;
160 rpcrdma_recv_buffer_put(req->rl_reply);
161 req->rl_reply = NULL;
163 spin_lock(&xprt->bc_pa_lock);
164 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
165 spin_unlock(&xprt->bc_pa_lock);
169 static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
171 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
172 struct rpcrdma_req *req;
173 struct rpc_rqst *rqst;
176 spin_lock(&xprt->bc_pa_lock);
177 rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
181 list_del(&rqst->rq_bc_pa_list);
182 spin_unlock(&xprt->bc_pa_lock);
186 spin_unlock(&xprt->bc_pa_lock);
188 /* Set a limit to prevent a remote from overrunning our resources.
190 if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
193 size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE);
194 req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
198 xprt->bc_alloc_count++;
199 rqst = &req->rl_slot;
200 rqst->rq_xprt = xprt;
201 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
202 xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
207 * rpcrdma_bc_receive_call - Handle a backward direction call
208 * @r_xprt: transport receiving the call
209 * @rep: receive buffer containing the call
211 * Operational assumptions:
212 * o Backchannel credits are ignored, just as the NFS server
213 * forechannel currently does
214 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
215 * No replay detection is done at the transport level
217 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
218 struct rpcrdma_rep *rep)
220 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
221 struct svc_serv *bc_serv;
222 struct rpcrdma_req *req;
223 struct rpc_rqst *rqst;
228 p = xdr_inline_decode(&rep->rr_stream, 0);
229 size = xdr_stream_remaining(&rep->rr_stream);
231 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
232 pr_info("RPC: %s: callback XID %08x, length=%u\n",
233 __func__, be32_to_cpup(p), size);
234 pr_info("RPC: %s: %*ph\n", __func__, size, p);
237 rqst = rpcrdma_bc_rqst_get(r_xprt);
241 rqst->rq_reply_bytes_recvd = 0;
244 rqst->rq_private_buf.len = size;
246 buf = &rqst->rq_rcv_buf;
247 memset(buf, 0, sizeof(*buf));
248 buf->head[0].iov_base = p;
249 buf->head[0].iov_len = size;
252 /* The receive buffer has to be hooked to the rpcrdma_req
253 * so that it is not released while the req is pointing
254 * to its buffer, and so that it can be reposted after
255 * the Upper Layer is done decoding it.
257 req = rpcr_to_rdmar(rqst);
259 trace_xprtrdma_cb_call(rqst);
261 /* Queue rqst for ULP's callback service */
262 bc_serv = xprt->bc_serv;
264 spin_lock(&bc_serv->sv_cb_lock);
265 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
266 spin_unlock(&bc_serv->sv_cb_lock);
268 wake_up(&bc_serv->sv_cb_waitq);
270 r_xprt->rx_stats.bcall_count++;
274 pr_warn("RPC/RDMA backchannel overflow\n");
275 xprt_force_disconnect(xprt);
276 /* This receive buffer gets reposted automatically
277 * when the connection is re-established.