1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * Encapsulates the major functions managing:
52 #include <linux/interrupt.h>
53 #include <linux/slab.h>
54 #include <linux/sunrpc/addr.h>
55 #include <linux/sunrpc/svc_rdma.h>
57 #include <asm-generic/barrier.h>
58 #include <asm/bitops.h>
60 #include <rdma/ib_cm.h>
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
69 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
70 # define RPCDBG_FACILITY RPCDBG_TRANS
76 static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
77 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
78 static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
79 static struct rpcrdma_regbuf *
80 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
82 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
83 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
84 static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
86 /* Wait for outstanding transport work to finish.
88 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
90 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
91 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
93 /* Flush Receives, then wait for deferred Reply work
96 ib_drain_rq(ia->ri_id->qp);
97 drain_workqueue(buf->rb_completion_wq);
99 /* Deferred Reply processing might have scheduled
100 * local invalidations.
102 ib_drain_sq(ia->ri_id->qp);
106 * rpcrdma_qp_event_handler - Handle one QP event (error notification)
107 * @event: details of the event
108 * @context: ep that owns QP where event occurred
110 * Called from the RDMA provider (device driver) possibly in an interrupt
114 rpcrdma_qp_event_handler(struct ib_event *event, void *context)
116 struct rpcrdma_ep *ep = context;
117 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
120 trace_xprtrdma_qp_event(r_xprt, event);
124 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
125 * @cq: completion queue (ignored)
130 rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
132 struct ib_cqe *cqe = wc->wr_cqe;
133 struct rpcrdma_sendctx *sc =
134 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
136 /* WARNING: Only wr_cqe and status are reliable at this point */
137 trace_xprtrdma_wc_send(sc, wc);
138 if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
139 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
140 ib_wc_status_msg(wc->status),
141 wc->status, wc->vendor_err);
143 rpcrdma_sendctx_put_locked(sc);
147 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
148 * @cq: completion queue (ignored)
153 rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
155 struct ib_cqe *cqe = wc->wr_cqe;
156 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
158 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
160 /* WARNING: Only wr_cqe and status are reliable at this point */
161 trace_xprtrdma_wc_receive(wc);
162 --r_xprt->rx_ep.rep_receive_count;
163 if (wc->status != IB_WC_SUCCESS)
166 /* status == SUCCESS means all fields in wc are trustworthy */
167 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
168 rep->rr_wc_flags = wc->wc_flags;
169 rep->rr_inv_rkey = wc->ex.invalidate_rkey;
171 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
172 rdmab_addr(rep->rr_rdmabuf),
173 wc->byte_len, DMA_FROM_DEVICE);
175 rpcrdma_post_recvs(r_xprt, false);
176 rpcrdma_reply_handler(rep);
180 if (wc->status != IB_WC_WR_FLUSH_ERR)
181 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
182 ib_wc_status_msg(wc->status),
183 wc->status, wc->vendor_err);
184 rpcrdma_recv_buffer_put(rep);
188 rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
189 struct rdma_conn_param *param)
191 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
192 const struct rpcrdma_connect_private *pmsg = param->private_data;
193 unsigned int rsize, wsize;
195 /* Default settings for RPC-over-RDMA Version One */
196 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
197 rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
198 wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
201 pmsg->cp_magic == rpcrdma_cmp_magic &&
202 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
203 r_xprt->rx_ia.ri_implicit_roundup = true;
204 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
205 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
208 if (rsize < cdata->inline_rsize)
209 cdata->inline_rsize = rsize;
210 if (wsize < cdata->inline_wsize)
211 cdata->inline_wsize = wsize;
212 dprintk("RPC: %s: max send %u, max recv %u\n",
213 __func__, cdata->inline_wsize, cdata->inline_rsize);
214 rpcrdma_set_max_header_sizes(r_xprt);
218 * rpcrdma_cm_event_handler - Handle RDMA CM events
219 * @id: rdma_cm_id on which an event has occurred
220 * @event: details of the event
222 * Called with @id's mutex held. Returns 1 if caller should
223 * destroy @id, otherwise 0.
226 rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
228 struct rpcrdma_xprt *r_xprt = id->context;
229 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
230 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
231 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
235 trace_xprtrdma_cm_event(r_xprt, event);
236 switch (event->event) {
237 case RDMA_CM_EVENT_ADDR_RESOLVED:
238 case RDMA_CM_EVENT_ROUTE_RESOLVED:
240 complete(&ia->ri_done);
242 case RDMA_CM_EVENT_ADDR_ERROR:
243 ia->ri_async_rc = -EPROTO;
244 complete(&ia->ri_done);
246 case RDMA_CM_EVENT_ROUTE_ERROR:
247 ia->ri_async_rc = -ENETUNREACH;
248 complete(&ia->ri_done);
250 case RDMA_CM_EVENT_DEVICE_REMOVAL:
251 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
252 pr_info("rpcrdma: removing device %s for %s:%s\n",
254 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
256 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
257 ep->rep_connected = -ENODEV;
258 xprt_force_disconnect(xprt);
259 wait_for_completion(&ia->ri_remove_done);
262 ia->ri_device = NULL;
263 /* Return 1 to ensure the core destroys the id. */
265 case RDMA_CM_EVENT_ESTABLISHED:
266 ++xprt->connect_cookie;
267 ep->rep_connected = 1;
268 rpcrdma_update_connect_private(r_xprt, &event->param.conn);
269 wake_up_all(&ep->rep_connect_wait);
271 case RDMA_CM_EVENT_CONNECT_ERROR:
272 ep->rep_connected = -ENOTCONN;
274 case RDMA_CM_EVENT_UNREACHABLE:
275 ep->rep_connected = -ENETUNREACH;
277 case RDMA_CM_EVENT_REJECTED:
278 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
279 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
280 rdma_reject_msg(id, event->status));
281 ep->rep_connected = -ECONNREFUSED;
282 if (event->status == IB_CM_REJ_STALE_CONN)
283 ep->rep_connected = -EAGAIN;
285 case RDMA_CM_EVENT_DISCONNECTED:
286 ep->rep_connected = -ECONNABORTED;
288 xprt_force_disconnect(xprt);
289 wake_up_all(&ep->rep_connect_wait);
295 dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__,
296 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
297 ia->ri_device->name, rdma_event_msg(event->event));
301 static struct rdma_cm_id *
302 rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
304 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
305 struct rdma_cm_id *id;
308 trace_xprtrdma_conn_start(xprt);
310 init_completion(&ia->ri_done);
311 init_completion(&ia->ri_remove_done);
313 id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
314 xprt, RDMA_PS_TCP, IB_QPT_RC);
318 ia->ri_async_rc = -ETIMEDOUT;
319 rc = rdma_resolve_addr(id, NULL,
320 (struct sockaddr *)&xprt->rx_xprt.addr,
321 RDMA_RESOLVE_TIMEOUT);
324 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
326 trace_xprtrdma_conn_tout(xprt);
330 rc = ia->ri_async_rc;
334 ia->ri_async_rc = -ETIMEDOUT;
335 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
338 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
340 trace_xprtrdma_conn_tout(xprt);
343 rc = ia->ri_async_rc;
355 * Exported functions.
359 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
360 * @xprt: transport with IA to (re)initialize
362 * Returns 0 on success, negative errno if an appropriate
363 * Interface Adapter could not be found and opened.
366 rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
368 struct rpcrdma_ia *ia = &xprt->rx_ia;
371 ia->ri_id = rpcrdma_create_id(xprt, ia);
372 if (IS_ERR(ia->ri_id)) {
373 rc = PTR_ERR(ia->ri_id);
376 ia->ri_device = ia->ri_id->device;
378 ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
379 if (IS_ERR(ia->ri_pd)) {
380 rc = PTR_ERR(ia->ri_pd);
381 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
385 switch (xprt_rdma_memreg_strategy) {
387 if (frwr_is_supported(ia))
391 pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
392 ia->ri_device->name, xprt_rdma_memreg_strategy);
400 rpcrdma_ia_close(ia);
405 * rpcrdma_ia_remove - Handle device driver unload
406 * @ia: interface adapter being removed
408 * Divest transport H/W resources associated with this adapter,
409 * but allow it to be restored later.
412 rpcrdma_ia_remove(struct rpcrdma_ia *ia)
414 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
416 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
417 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
418 struct rpcrdma_req *req;
419 struct rpcrdma_rep *rep;
421 cancel_delayed_work_sync(&buf->rb_refresh_worker);
423 /* This is similar to rpcrdma_ep_destroy, but:
424 * - Don't cancel the connect worker.
425 * - Don't call rpcrdma_ep_disconnect, which waits
426 * for another conn upcall, which will deadlock.
427 * - rdma_disconnect is unneeded, the underlying
428 * connection is already gone.
431 rpcrdma_xprt_drain(r_xprt);
432 rdma_destroy_qp(ia->ri_id);
433 ia->ri_id->qp = NULL;
435 ib_free_cq(ep->rep_attr.recv_cq);
436 ep->rep_attr.recv_cq = NULL;
437 ib_free_cq(ep->rep_attr.send_cq);
438 ep->rep_attr.send_cq = NULL;
440 /* The ULP is responsible for ensuring all DMA
441 * mappings and MRs are gone.
443 list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
444 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
445 list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
446 rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
447 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
448 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
450 rpcrdma_mrs_destroy(buf);
451 ib_dealloc_pd(ia->ri_pd);
454 /* Allow waiters to continue */
455 complete(&ia->ri_remove_done);
457 trace_xprtrdma_remove(r_xprt);
461 * rpcrdma_ia_close - Clean up/close an IA.
462 * @ia: interface adapter to close
466 rpcrdma_ia_close(struct rpcrdma_ia *ia)
468 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
470 rdma_destroy_qp(ia->ri_id);
471 rdma_destroy_id(ia->ri_id);
474 ia->ri_device = NULL;
476 /* If the pd is still busy, xprtrdma missed freeing a resource */
477 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
478 ib_dealloc_pd(ia->ri_pd);
483 * Create unconnected endpoint.
486 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
487 struct rpcrdma_create_data_internal *cdata)
489 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
490 struct ib_cq *sendcq, *recvcq;
491 unsigned int max_sge;
494 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_send_sge,
495 RPCRDMA_MAX_SEND_SGES);
496 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
497 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
500 ia->ri_max_send_sges = max_sge;
502 rc = frwr_open(ia, ep, cdata);
506 ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
507 ep->rep_attr.qp_context = ep;
508 ep->rep_attr.srq = NULL;
509 ep->rep_attr.cap.max_send_sge = max_sge;
510 ep->rep_attr.cap.max_recv_sge = 1;
511 ep->rep_attr.cap.max_inline_data = 0;
512 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
513 ep->rep_attr.qp_type = IB_QPT_RC;
514 ep->rep_attr.port_num = ~0;
516 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
517 "iovs: send %d recv %d\n",
519 ep->rep_attr.cap.max_send_wr,
520 ep->rep_attr.cap.max_recv_wr,
521 ep->rep_attr.cap.max_send_sge,
522 ep->rep_attr.cap.max_recv_sge);
524 /* set trigger for requesting send completion */
525 ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
526 cdata->max_requests >> 2);
527 ep->rep_send_count = ep->rep_send_batch;
528 init_waitqueue_head(&ep->rep_connect_wait);
529 ep->rep_receive_count = 0;
531 sendcq = ib_alloc_cq(ia->ri_device, NULL,
532 ep->rep_attr.cap.max_send_wr + 1,
533 ia->ri_device->num_comp_vectors > 1 ? 1 : 0,
535 if (IS_ERR(sendcq)) {
536 rc = PTR_ERR(sendcq);
540 recvcq = ib_alloc_cq(ia->ri_device, NULL,
541 ep->rep_attr.cap.max_recv_wr + 1,
542 0, IB_POLL_WORKQUEUE);
543 if (IS_ERR(recvcq)) {
544 rc = PTR_ERR(recvcq);
548 ep->rep_attr.send_cq = sendcq;
549 ep->rep_attr.recv_cq = recvcq;
551 /* Initialize cma parameters */
552 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
554 /* Prepare RDMA-CM private message */
555 pmsg->cp_magic = rpcrdma_cmp_magic;
556 pmsg->cp_version = RPCRDMA_CMP_VERSION;
557 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
558 pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
559 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
560 ep->rep_remote_cma.private_data = pmsg;
561 ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
563 /* Client offers RDMA Read but does not initiate */
564 ep->rep_remote_cma.initiator_depth = 0;
565 ep->rep_remote_cma.responder_resources =
566 min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom);
568 /* Limit transport retries so client can detect server
569 * GID changes quickly. RPC layer handles re-establishing
570 * transport connection and retransmission.
572 ep->rep_remote_cma.retry_count = 6;
574 /* RPC-over-RDMA handles its own flow control. In addition,
575 * make all RNR NAKs visible so we know that RPC-over-RDMA
576 * flow control is working correctly (no NAKs should be seen).
578 ep->rep_remote_cma.flow_control = 0;
579 ep->rep_remote_cma.rnr_retry_count = 0;
592 * Disconnect and destroy endpoint. After this, the only
593 * valid operations on the ep are to free it (if dynamically
594 * allocated) or re-create it.
597 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
599 if (ia->ri_id && ia->ri_id->qp) {
600 rpcrdma_ep_disconnect(ep, ia);
601 rdma_destroy_qp(ia->ri_id);
602 ia->ri_id->qp = NULL;
605 if (ep->rep_attr.recv_cq)
606 ib_free_cq(ep->rep_attr.recv_cq);
607 if (ep->rep_attr.send_cq)
608 ib_free_cq(ep->rep_attr.send_cq);
611 /* Re-establish a connection after a device removal event.
612 * Unlike a normal reconnection, a fresh PD and a new set
613 * of MRs and buffers is needed.
616 rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
617 struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
621 trace_xprtrdma_reinsert(r_xprt);
624 if (rpcrdma_ia_open(r_xprt))
628 err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data);
630 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
635 err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
637 pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
641 rpcrdma_mrs_create(r_xprt);
645 rpcrdma_ep_destroy(ep, ia);
647 rpcrdma_ia_close(ia);
653 rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
654 struct rpcrdma_ia *ia)
656 struct rdma_cm_id *id, *old;
659 trace_xprtrdma_reconnect(r_xprt);
661 rpcrdma_ep_disconnect(ep, ia);
664 id = rpcrdma_create_id(r_xprt, ia);
668 /* As long as the new ID points to the same device as the
669 * old ID, we can reuse the transport's existing PD and all
670 * previously allocated MRs. Also, the same device means
671 * the transport's previous DMA mappings are still valid.
673 * This is a sanity check only. There should be no way these
674 * point to two different devices here.
678 if (ia->ri_device != id->device) {
679 pr_err("rpcrdma: can't reconnect on different device!\n");
683 err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
687 /* Atomically replace the transport's ID and QP. */
691 rdma_destroy_qp(old);
694 rdma_destroy_id(old);
700 * Connect unconnected endpoint.
703 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
705 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
707 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
711 switch (ep->rep_connected) {
713 dprintk("RPC: %s: connecting...\n", __func__);
714 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
721 rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
726 rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
731 ep->rep_connected = 0;
732 xprt_clear_connected(xprt);
734 rpcrdma_post_recvs(r_xprt, true);
736 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
740 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
741 if (ep->rep_connected <= 0) {
742 if (ep->rep_connected == -EAGAIN)
744 rc = ep->rep_connected;
748 dprintk("RPC: %s: connected\n", __func__);
752 ep->rep_connected = rc;
759 * rpcrdma_ep_disconnect - Disconnect underlying transport
760 * @ep: endpoint to disconnect
761 * @ia: associated interface adapter
763 * This is separate from destroy to facilitate the ability
764 * to reconnect without recreating the endpoint.
766 * This call is not reentrant, and must not be made in parallel
767 * on the same endpoint.
770 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
772 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
776 /* returns without wait if ID is not connected */
777 rc = rdma_disconnect(ia->ri_id);
779 wait_event_interruptible(ep->rep_connect_wait,
780 ep->rep_connected != 1);
782 ep->rep_connected = rc;
783 trace_xprtrdma_disconnect(r_xprt, rc);
785 rpcrdma_xprt_drain(r_xprt);
788 /* Fixed-size circular FIFO queue. This implementation is wait-free and
791 * Consumer is the code path that posts Sends. This path dequeues a
792 * sendctx for use by a Send operation. Multiple consumer threads
793 * are serialized by the RPC transport lock, which allows only one
794 * ->send_request call at a time.
796 * Producer is the code path that handles Send completions. This path
797 * enqueues a sendctx that has been completed. Multiple producer
798 * threads are serialized by the ib_poll_cq() function.
801 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
802 * queue activity, and ib_drain_qp has flushed all remaining Send
805 static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
809 for (i = 0; i <= buf->rb_sc_last; i++)
810 kfree(buf->rb_sc_ctxs[i]);
811 kfree(buf->rb_sc_ctxs);
814 static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
816 struct rpcrdma_sendctx *sc;
818 sc = kzalloc(sizeof(*sc) +
819 ia->ri_max_send_sges * sizeof(struct ib_sge),
824 sc->sc_wr.wr_cqe = &sc->sc_cqe;
825 sc->sc_wr.sg_list = sc->sc_sges;
826 sc->sc_wr.opcode = IB_WR_SEND;
827 sc->sc_cqe.done = rpcrdma_wc_send;
831 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
833 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
834 struct rpcrdma_sendctx *sc;
837 /* Maximum number of concurrent outstanding Send WRs. Capping
838 * the circular queue size stops Send Queue overflow by causing
839 * the ->send_request call to fail temporarily before too many
842 i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
843 dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i);
844 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
845 if (!buf->rb_sc_ctxs)
848 buf->rb_sc_last = i - 1;
849 for (i = 0; i <= buf->rb_sc_last; i++) {
850 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
854 sc->sc_xprt = r_xprt;
855 buf->rb_sc_ctxs[i] = sc;
861 /* The sendctx queue is not guaranteed to have a size that is a
862 * power of two, thus the helpers in circ_buf.h cannot be used.
863 * The other option is to use modulus (%), which can be expensive.
865 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
868 return likely(item < buf->rb_sc_last) ? item + 1 : 0;
872 * rpcrdma_sendctx_get_locked - Acquire a send context
873 * @r_xprt: controlling transport instance
875 * Returns pointer to a free send completion context; or NULL if
876 * the queue is empty.
878 * Usage: Called to acquire an SGE array before preparing a Send WR.
880 * The caller serializes calls to this function (per transport), and
881 * provides an effective memory barrier that flushes the new value
884 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
886 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
887 struct rpcrdma_sendctx *sc;
888 unsigned long next_head;
890 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
892 if (next_head == READ_ONCE(buf->rb_sc_tail))
895 /* ORDER: item must be accessed _before_ head is updated */
896 sc = buf->rb_sc_ctxs[next_head];
898 /* Releasing the lock in the caller acts as a memory
899 * barrier that flushes rb_sc_head.
901 buf->rb_sc_head = next_head;
906 /* The queue is "empty" if there have not been enough Send
907 * completions recently. This is a sign the Send Queue is
908 * backing up. Cause the caller to pause and try again.
910 set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags);
911 r_xprt->rx_stats.empty_sendctx_q++;
916 * rpcrdma_sendctx_put_locked - Release a send context
917 * @sc: send context to release
919 * Usage: Called from Send completion to return a sendctxt
922 * The caller serializes calls to this function (per transport).
925 rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
927 struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
928 unsigned long next_tail;
930 /* Unmap SGEs of previously completed but unsignaled
931 * Sends by walking up the queue until @sc is found.
933 next_tail = buf->rb_sc_tail;
935 next_tail = rpcrdma_sendctx_next(buf, next_tail);
937 /* ORDER: item must be accessed _before_ tail is updated */
938 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
940 } while (buf->rb_sc_ctxs[next_tail] != sc);
942 /* Paired with READ_ONCE */
943 smp_store_release(&buf->rb_sc_tail, next_tail);
945 if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
946 smp_mb__after_atomic();
947 xprt_write_space(&sc->sc_xprt->rx_xprt);
952 rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
954 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
955 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
960 for (count = 0; count < ia->ri_max_segs; count++) {
961 struct rpcrdma_mr *mr;
964 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
968 rc = frwr_init_mr(ia, mr);
974 mr->mr_xprt = r_xprt;
976 list_add(&mr->mr_list, &free);
977 list_add(&mr->mr_all, &all);
980 spin_lock(&buf->rb_mrlock);
981 list_splice(&free, &buf->rb_mrs);
982 list_splice(&all, &buf->rb_all);
983 r_xprt->rx_stats.mrs_allocated += count;
984 spin_unlock(&buf->rb_mrlock);
985 trace_xprtrdma_createmrs(r_xprt, count);
987 xprt_write_space(&r_xprt->rx_xprt);
991 rpcrdma_mr_refresh_worker(struct work_struct *work)
993 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
994 rb_refresh_worker.work);
995 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
998 rpcrdma_mrs_create(r_xprt);
1002 * rpcrdma_req_create - Allocate an rpcrdma_req object
1003 * @r_xprt: controlling r_xprt
1004 * @size: initial size, in bytes, of send and receive buffers
1005 * @flags: GFP flags passed to memory allocators
1007 * Returns an allocated and fully initialized rpcrdma_req or NULL.
1009 struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
1012 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1013 struct rpcrdma_regbuf *rb;
1014 struct rpcrdma_req *req;
1016 req = kzalloc(sizeof(*req), flags);
1020 rb = rpcrdma_regbuf_alloc(RPCRDMA_HDRBUF_SIZE, DMA_TO_DEVICE, flags);
1023 req->rl_rdmabuf = rb;
1024 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
1026 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
1027 if (!req->rl_sendbuf)
1030 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
1031 if (!req->rl_recvbuf)
1034 req->rl_buffer = buffer;
1035 INIT_LIST_HEAD(&req->rl_registered);
1036 spin_lock(&buffer->rb_lock);
1037 list_add(&req->rl_all, &buffer->rb_allreqs);
1038 spin_unlock(&buffer->rb_lock);
1042 kfree(req->rl_sendbuf);
1044 kfree(req->rl_rdmabuf);
1051 static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp)
1053 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1054 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1055 struct rpcrdma_rep *rep;
1057 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1061 rep->rr_rdmabuf = rpcrdma_regbuf_alloc(cdata->inline_rsize,
1062 DMA_FROM_DEVICE, GFP_KERNEL);
1063 if (!rep->rr_rdmabuf)
1065 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
1066 rdmab_length(rep->rr_rdmabuf));
1068 rep->rr_cqe.done = rpcrdma_wc_receive;
1069 rep->rr_rxprt = r_xprt;
1070 INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
1071 rep->rr_recv_wr.next = NULL;
1072 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
1073 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1074 rep->rr_recv_wr.num_sge = 1;
1075 rep->rr_temp = temp;
1077 spin_lock(&buf->rb_lock);
1078 list_add(&rep->rr_list, &buf->rb_recv_bufs);
1079 spin_unlock(&buf->rb_lock);
1089 rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1091 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1095 buf->rb_max_requests = r_xprt->rx_data.max_requests;
1096 buf->rb_bc_srv_max_requests = 0;
1097 spin_lock_init(&buf->rb_mrlock);
1098 spin_lock_init(&buf->rb_lock);
1099 INIT_LIST_HEAD(&buf->rb_mrs);
1100 INIT_LIST_HEAD(&buf->rb_all);
1101 INIT_DELAYED_WORK(&buf->rb_refresh_worker,
1102 rpcrdma_mr_refresh_worker);
1104 rpcrdma_mrs_create(r_xprt);
1106 INIT_LIST_HEAD(&buf->rb_send_bufs);
1107 INIT_LIST_HEAD(&buf->rb_allreqs);
1110 for (i = 0; i < buf->rb_max_requests; i++) {
1111 struct rpcrdma_req *req;
1113 req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE,
1117 list_add(&req->rl_list, &buf->rb_send_bufs);
1120 buf->rb_credits = 1;
1121 INIT_LIST_HEAD(&buf->rb_recv_bufs);
1123 rc = rpcrdma_sendctxs_create(r_xprt);
1127 buf->rb_completion_wq = alloc_workqueue("rpcrdma-%s",
1128 WQ_MEM_RECLAIM | WQ_HIGHPRI,
1130 r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
1131 if (!buf->rb_completion_wq) {
1138 rpcrdma_buffer_destroy(buf);
1142 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
1144 rpcrdma_regbuf_free(rep->rr_rdmabuf);
1149 * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1150 * @req: unused object to be destroyed
1152 * This function assumes that the caller prevents concurrent device
1153 * unload and transport tear-down.
1156 rpcrdma_req_destroy(struct rpcrdma_req *req)
1158 list_del(&req->rl_all);
1160 rpcrdma_regbuf_free(req->rl_recvbuf);
1161 rpcrdma_regbuf_free(req->rl_sendbuf);
1162 rpcrdma_regbuf_free(req->rl_rdmabuf);
1167 rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
1169 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1171 struct rpcrdma_mr *mr;
1175 spin_lock(&buf->rb_mrlock);
1176 while (!list_empty(&buf->rb_all)) {
1177 mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
1178 list_del(&mr->mr_all);
1180 spin_unlock(&buf->rb_mrlock);
1182 /* Ensure MW is not on any rl_registered list */
1183 if (!list_empty(&mr->mr_list))
1184 list_del(&mr->mr_list);
1186 frwr_release_mr(mr);
1188 spin_lock(&buf->rb_mrlock);
1190 spin_unlock(&buf->rb_mrlock);
1191 r_xprt->rx_stats.mrs_allocated = 0;
1193 dprintk("RPC: %s: released %u MRs\n", __func__, count);
1197 * rpcrdma_buffer_destroy - Release all hw resources
1198 * @buf: root control block for resources
1200 * ORDERING: relies on a prior ib_drain_qp :
1201 * - No more Send or Receive completions can occur
1202 * - All MRs, reps, and reqs are returned to their free lists
1205 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1207 cancel_delayed_work_sync(&buf->rb_refresh_worker);
1209 if (buf->rb_completion_wq) {
1210 destroy_workqueue(buf->rb_completion_wq);
1211 buf->rb_completion_wq = NULL;
1214 rpcrdma_sendctxs_destroy(buf);
1216 while (!list_empty(&buf->rb_recv_bufs)) {
1217 struct rpcrdma_rep *rep;
1219 rep = list_first_entry(&buf->rb_recv_bufs,
1220 struct rpcrdma_rep, rr_list);
1221 list_del(&rep->rr_list);
1222 rpcrdma_rep_destroy(rep);
1225 while (!list_empty(&buf->rb_send_bufs)) {
1226 struct rpcrdma_req *req;
1228 req = list_first_entry(&buf->rb_send_bufs,
1229 struct rpcrdma_req, rl_list);
1230 list_del(&req->rl_list);
1231 rpcrdma_req_destroy(req);
1234 rpcrdma_mrs_destroy(buf);
1238 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1239 * @r_xprt: controlling transport
1241 * Returns an initialized rpcrdma_mr or NULL if no free
1242 * rpcrdma_mr objects are available.
1245 rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1247 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1248 struct rpcrdma_mr *mr = NULL;
1250 spin_lock(&buf->rb_mrlock);
1251 if (!list_empty(&buf->rb_mrs))
1252 mr = rpcrdma_mr_pop(&buf->rb_mrs);
1253 spin_unlock(&buf->rb_mrlock);
1260 trace_xprtrdma_nomrs(r_xprt);
1261 if (r_xprt->rx_ep.rep_connected != -ENODEV)
1262 schedule_delayed_work(&buf->rb_refresh_worker, 0);
1264 /* Allow the reply handler and refresh worker to run */
1271 __rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
1273 spin_lock(&buf->rb_mrlock);
1274 rpcrdma_mr_push(mr, &buf->rb_mrs);
1275 spin_unlock(&buf->rb_mrlock);
1279 * rpcrdma_mr_put - Release an rpcrdma_mr object
1280 * @mr: object to release
1284 rpcrdma_mr_put(struct rpcrdma_mr *mr)
1286 __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
1290 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
1291 * @mr: object to release
1295 rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
1297 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1299 if (mr->mr_dir != DMA_NONE) {
1300 trace_xprtrdma_mr_unmap(mr);
1301 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
1302 mr->mr_sg, mr->mr_nents, mr->mr_dir);
1303 mr->mr_dir = DMA_NONE;
1305 __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
1309 * rpcrdma_buffer_get - Get a request buffer
1310 * @buffers: Buffer pool from which to obtain a buffer
1312 * Returns a fresh rpcrdma_req, or NULL if none are available.
1314 struct rpcrdma_req *
1315 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1317 struct rpcrdma_req *req;
1319 spin_lock(&buffers->rb_lock);
1320 req = list_first_entry_or_null(&buffers->rb_send_bufs,
1321 struct rpcrdma_req, rl_list);
1323 list_del_init(&req->rl_list);
1324 spin_unlock(&buffers->rb_lock);
1329 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1330 * @req: object to return
1334 rpcrdma_buffer_put(struct rpcrdma_req *req)
1336 struct rpcrdma_buffer *buffers = req->rl_buffer;
1337 struct rpcrdma_rep *rep = req->rl_reply;
1339 req->rl_reply = NULL;
1341 spin_lock(&buffers->rb_lock);
1342 list_add(&req->rl_list, &buffers->rb_send_bufs);
1344 if (!rep->rr_temp) {
1345 list_add(&rep->rr_list, &buffers->rb_recv_bufs);
1349 spin_unlock(&buffers->rb_lock);
1351 rpcrdma_rep_destroy(rep);
1355 * Put reply buffers back into pool when not attached to
1356 * request. This happens in error conditions.
1359 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1361 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1363 if (!rep->rr_temp) {
1364 spin_lock(&buffers->rb_lock);
1365 list_add(&rep->rr_list, &buffers->rb_recv_bufs);
1366 spin_unlock(&buffers->rb_lock);
1368 rpcrdma_rep_destroy(rep);
1372 /* Returns a pointer to a rpcrdma_regbuf object, or NULL.
1374 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1375 * receiving the payload of RDMA RECV operations. During Long Calls
1376 * or Replies they may be registered externally via frwr_map.
1378 static struct rpcrdma_regbuf *
1379 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
1382 struct rpcrdma_regbuf *rb;
1384 rb = kmalloc(sizeof(*rb), flags);
1387 rb->rg_data = kmalloc(size, flags);
1393 rb->rg_device = NULL;
1394 rb->rg_direction = direction;
1395 rb->rg_iov.length = size;
1400 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1401 * @rb: regbuf to reallocate
1402 * @size: size of buffer to be allocated, in bytes
1405 * Returns true if reallocation was successful. If false is
1406 * returned, @rb is left untouched.
1408 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
1412 buf = kmalloc(size, flags);
1416 rpcrdma_regbuf_dma_unmap(rb);
1420 rb->rg_iov.length = size;
1425 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1426 * @r_xprt: controlling transport instance
1427 * @rb: regbuf to be mapped
1429 * Returns true if the buffer is now DMA mapped to @r_xprt's device
1431 bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
1432 struct rpcrdma_regbuf *rb)
1434 struct ib_device *device = r_xprt->rx_ia.ri_device;
1436 if (rb->rg_direction == DMA_NONE)
1439 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
1440 rdmab_length(rb), rb->rg_direction);
1441 if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
1442 trace_xprtrdma_dma_maperr(rdmab_addr(rb));
1446 rb->rg_device = device;
1447 rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey;
1451 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
1456 if (!rpcrdma_regbuf_is_mapped(rb))
1459 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
1461 rb->rg_device = NULL;
1464 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
1466 rpcrdma_regbuf_dma_unmap(rb);
1473 * rpcrdma_ep_post - Post WRs to a transport's Send Queue
1474 * @ia: transport's device information
1475 * @ep: transport's RDMA endpoint information
1476 * @req: rpcrdma_req containing the Send WR to post
1478 * Returns 0 if the post was successful, otherwise -ENOTCONN
1482 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1483 struct rpcrdma_ep *ep,
1484 struct rpcrdma_req *req)
1486 struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
1489 if (!ep->rep_send_count ||
1490 test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1491 send_wr->send_flags |= IB_SEND_SIGNALED;
1492 ep->rep_send_count = ep->rep_send_batch;
1494 send_wr->send_flags &= ~IB_SEND_SIGNALED;
1495 --ep->rep_send_count;
1498 rc = frwr_send(ia, req);
1499 trace_xprtrdma_post_send(req, rc);
1506 rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
1508 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1509 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
1510 struct ib_recv_wr *wr, *bad_wr;
1511 int needed, count, rc;
1515 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
1516 if (ep->rep_receive_count > needed)
1518 needed -= ep->rep_receive_count;
1520 needed += RPCRDMA_MAX_RECV_BATCH;
1525 struct rpcrdma_regbuf *rb;
1526 struct rpcrdma_rep *rep;
1528 spin_lock(&buf->rb_lock);
1529 rep = list_first_entry_or_null(&buf->rb_recv_bufs,
1530 struct rpcrdma_rep, rr_list);
1532 list_del(&rep->rr_list);
1533 spin_unlock(&buf->rb_lock);
1535 if (!rpcrdma_rep_create(r_xprt, temp))
1540 rb = rep->rr_rdmabuf;
1541 if (!rpcrdma_regbuf_dma_map(r_xprt, rb)) {
1542 rpcrdma_recv_buffer_put(rep);
1546 trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
1547 rep->rr_recv_wr.next = wr;
1548 wr = &rep->rr_recv_wr;
1555 rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
1556 (const struct ib_recv_wr **)&bad_wr);
1558 for (wr = bad_wr; wr; wr = wr->next) {
1559 struct rpcrdma_rep *rep;
1561 rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1562 rpcrdma_recv_buffer_put(rep);
1566 ep->rep_receive_count += count;
1568 trace_xprtrdma_post_recvs(r_xprt, count, rc);