]> asedeno.scripts.mit.edu Git - linux.git/blob - net/sunrpc/xprtrdma/verbs.c
3a56458e8c056ff10a9c81daa06566e895916e7f
[linux.git] / net / sunrpc / xprtrdma / verbs.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41
42 /*
43  * verbs.c
44  *
45  * Encapsulates the major functions managing:
46  *  o adapters
47  *  o endpoints
48  *  o connections
49  *  o buffer memory
50  */
51
52 #include <linux/interrupt.h>
53 #include <linux/slab.h>
54 #include <linux/sunrpc/addr.h>
55 #include <linux/sunrpc/svc_rdma.h>
56 #include <linux/log2.h>
57
58 #include <asm-generic/barrier.h>
59 #include <asm/bitops.h>
60
61 #include <rdma/ib_cm.h>
62
63 #include "xprt_rdma.h"
64 #include <trace/events/rpcrdma.h>
65
66 /*
67  * Globals/Macros
68  */
69
70 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
71 # define RPCDBG_FACILITY        RPCDBG_TRANS
72 #endif
73
74 /*
75  * internal functions
76  */
77 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
78                                        struct rpcrdma_sendctx *sc);
79 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
80 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf);
81 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
82 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
83 static struct rpcrdma_regbuf *
84 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
85                      gfp_t flags);
86 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
87 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
88
89 /* Wait for outstanding transport work to finish. ib_drain_qp
90  * handles the drains in the wrong order for us, so open code
91  * them here.
92  */
93 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
94 {
95         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
96
97         /* Flush Receives, then wait for deferred Reply work
98          * to complete.
99          */
100         ib_drain_rq(ia->ri_id->qp);
101
102         /* Deferred Reply processing might have scheduled
103          * local invalidations.
104          */
105         ib_drain_sq(ia->ri_id->qp);
106 }
107
108 /**
109  * rpcrdma_qp_event_handler - Handle one QP event (error notification)
110  * @event: details of the event
111  * @context: ep that owns QP where event occurred
112  *
113  * Called from the RDMA provider (device driver) possibly in an interrupt
114  * context.
115  */
116 static void
117 rpcrdma_qp_event_handler(struct ib_event *event, void *context)
118 {
119         struct rpcrdma_ep *ep = context;
120         struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
121                                                    rx_ep);
122
123         trace_xprtrdma_qp_event(r_xprt, event);
124 }
125
126 /**
127  * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
128  * @cq: completion queue
129  * @wc: completed WR
130  *
131  */
132 static void
133 rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
134 {
135         struct ib_cqe *cqe = wc->wr_cqe;
136         struct rpcrdma_sendctx *sc =
137                 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
138
139         /* WARNING: Only wr_cqe and status are reliable at this point */
140         trace_xprtrdma_wc_send(sc, wc);
141         rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc);
142 }
143
144 /**
145  * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
146  * @cq: completion queue (ignored)
147  * @wc: completed WR
148  *
149  */
150 static void
151 rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
152 {
153         struct ib_cqe *cqe = wc->wr_cqe;
154         struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
155                                                rr_cqe);
156         struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
157
158         /* WARNING: Only wr_cqe and status are reliable at this point */
159         trace_xprtrdma_wc_receive(wc);
160         --r_xprt->rx_ep.rep_receive_count;
161         if (wc->status != IB_WC_SUCCESS)
162                 goto out_flushed;
163
164         /* status == SUCCESS means all fields in wc are trustworthy */
165         rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
166         rep->rr_wc_flags = wc->wc_flags;
167         rep->rr_inv_rkey = wc->ex.invalidate_rkey;
168
169         ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
170                                    rdmab_addr(rep->rr_rdmabuf),
171                                    wc->byte_len, DMA_FROM_DEVICE);
172
173         rpcrdma_reply_handler(rep);
174         return;
175
176 out_flushed:
177         rpcrdma_recv_buffer_put(rep);
178 }
179
180 static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt,
181                                       struct rdma_conn_param *param)
182 {
183         const struct rpcrdma_connect_private *pmsg = param->private_data;
184         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
185         unsigned int rsize, wsize;
186
187         /* Default settings for RPC-over-RDMA Version One */
188         r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
189         rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
190         wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
191
192         if (pmsg &&
193             pmsg->cp_magic == rpcrdma_cmp_magic &&
194             pmsg->cp_version == RPCRDMA_CMP_VERSION) {
195                 r_xprt->rx_ia.ri_implicit_roundup = true;
196                 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
197                 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
198         }
199
200         if (rsize < ep->rep_inline_recv)
201                 ep->rep_inline_recv = rsize;
202         if (wsize < ep->rep_inline_send)
203                 ep->rep_inline_send = wsize;
204
205         rpcrdma_set_max_header_sizes(r_xprt);
206 }
207
208 /**
209  * rpcrdma_cm_event_handler - Handle RDMA CM events
210  * @id: rdma_cm_id on which an event has occurred
211  * @event: details of the event
212  *
213  * Called with @id's mutex held. Returns 1 if caller should
214  * destroy @id, otherwise 0.
215  */
216 static int
217 rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
218 {
219         struct rpcrdma_xprt *r_xprt = id->context;
220         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
221         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
222         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
223
224         might_sleep();
225
226         trace_xprtrdma_cm_event(r_xprt, event);
227         switch (event->event) {
228         case RDMA_CM_EVENT_ADDR_RESOLVED:
229         case RDMA_CM_EVENT_ROUTE_RESOLVED:
230                 ia->ri_async_rc = 0;
231                 complete(&ia->ri_done);
232                 return 0;
233         case RDMA_CM_EVENT_ADDR_ERROR:
234                 ia->ri_async_rc = -EPROTO;
235                 complete(&ia->ri_done);
236                 return 0;
237         case RDMA_CM_EVENT_ROUTE_ERROR:
238                 ia->ri_async_rc = -ENETUNREACH;
239                 complete(&ia->ri_done);
240                 return 0;
241         case RDMA_CM_EVENT_DEVICE_REMOVAL:
242 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
243                 pr_info("rpcrdma: removing device %s for %s:%s\n",
244                         ia->ri_id->device->name,
245                         rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
246 #endif
247                 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
248                 ep->rep_connected = -ENODEV;
249                 xprt_force_disconnect(xprt);
250                 wait_for_completion(&ia->ri_remove_done);
251
252                 ia->ri_id = NULL;
253                 /* Return 1 to ensure the core destroys the id. */
254                 return 1;
255         case RDMA_CM_EVENT_ESTABLISHED:
256                 ++xprt->connect_cookie;
257                 ep->rep_connected = 1;
258                 rpcrdma_update_cm_private(r_xprt, &event->param.conn);
259                 trace_xprtrdma_inline_thresh(r_xprt);
260                 wake_up_all(&ep->rep_connect_wait);
261                 break;
262         case RDMA_CM_EVENT_CONNECT_ERROR:
263                 ep->rep_connected = -ENOTCONN;
264                 goto disconnected;
265         case RDMA_CM_EVENT_UNREACHABLE:
266                 ep->rep_connected = -ENETUNREACH;
267                 goto disconnected;
268         case RDMA_CM_EVENT_REJECTED:
269                 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
270                         rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
271                         rdma_reject_msg(id, event->status));
272                 ep->rep_connected = -ECONNREFUSED;
273                 if (event->status == IB_CM_REJ_STALE_CONN)
274                         ep->rep_connected = -EAGAIN;
275                 goto disconnected;
276         case RDMA_CM_EVENT_DISCONNECTED:
277                 ep->rep_connected = -ECONNABORTED;
278 disconnected:
279                 xprt_force_disconnect(xprt);
280                 wake_up_all(&ep->rep_connect_wait);
281                 break;
282         default:
283                 break;
284         }
285
286         dprintk("RPC:       %s: %s:%s on %s/frwr: %s\n", __func__,
287                 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
288                 ia->ri_id->device->name, rdma_event_msg(event->event));
289         return 0;
290 }
291
292 static struct rdma_cm_id *
293 rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
294 {
295         unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
296         struct rdma_cm_id *id;
297         int rc;
298
299         init_completion(&ia->ri_done);
300         init_completion(&ia->ri_remove_done);
301
302         id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
303                             xprt, RDMA_PS_TCP, IB_QPT_RC);
304         if (IS_ERR(id))
305                 return id;
306
307         ia->ri_async_rc = -ETIMEDOUT;
308         rc = rdma_resolve_addr(id, NULL,
309                                (struct sockaddr *)&xprt->rx_xprt.addr,
310                                RDMA_RESOLVE_TIMEOUT);
311         if (rc)
312                 goto out;
313         rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
314         if (rc < 0)
315                 goto out;
316
317         rc = ia->ri_async_rc;
318         if (rc)
319                 goto out;
320
321         ia->ri_async_rc = -ETIMEDOUT;
322         rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
323         if (rc)
324                 goto out;
325         rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
326         if (rc < 0)
327                 goto out;
328         rc = ia->ri_async_rc;
329         if (rc)
330                 goto out;
331
332         return id;
333
334 out:
335         rdma_destroy_id(id);
336         return ERR_PTR(rc);
337 }
338
339 /*
340  * Exported functions.
341  */
342
343 /**
344  * rpcrdma_ia_open - Open and initialize an Interface Adapter.
345  * @xprt: transport with IA to (re)initialize
346  *
347  * Returns 0 on success, negative errno if an appropriate
348  * Interface Adapter could not be found and opened.
349  */
350 int
351 rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
352 {
353         struct rpcrdma_ia *ia = &xprt->rx_ia;
354         int rc;
355
356         ia->ri_id = rpcrdma_create_id(xprt, ia);
357         if (IS_ERR(ia->ri_id)) {
358                 rc = PTR_ERR(ia->ri_id);
359                 goto out_err;
360         }
361
362         ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0);
363         if (IS_ERR(ia->ri_pd)) {
364                 rc = PTR_ERR(ia->ri_pd);
365                 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
366                 goto out_err;
367         }
368
369         switch (xprt_rdma_memreg_strategy) {
370         case RPCRDMA_FRWR:
371                 if (frwr_is_supported(ia->ri_id->device))
372                         break;
373                 /*FALLTHROUGH*/
374         default:
375                 pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
376                        ia->ri_id->device->name, xprt_rdma_memreg_strategy);
377                 rc = -EINVAL;
378                 goto out_err;
379         }
380
381         return 0;
382
383 out_err:
384         rpcrdma_ia_close(ia);
385         return rc;
386 }
387
388 /**
389  * rpcrdma_ia_remove - Handle device driver unload
390  * @ia: interface adapter being removed
391  *
392  * Divest transport H/W resources associated with this adapter,
393  * but allow it to be restored later.
394  */
395 void
396 rpcrdma_ia_remove(struct rpcrdma_ia *ia)
397 {
398         struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
399                                                    rx_ia);
400         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
401         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
402         struct rpcrdma_req *req;
403
404         /* This is similar to rpcrdma_ep_destroy, but:
405          * - Don't cancel the connect worker.
406          * - Don't call rpcrdma_ep_disconnect, which waits
407          *   for another conn upcall, which will deadlock.
408          * - rdma_disconnect is unneeded, the underlying
409          *   connection is already gone.
410          */
411         if (ia->ri_id->qp) {
412                 rpcrdma_xprt_drain(r_xprt);
413                 rdma_destroy_qp(ia->ri_id);
414                 ia->ri_id->qp = NULL;
415         }
416         ib_free_cq(ep->rep_attr.recv_cq);
417         ep->rep_attr.recv_cq = NULL;
418         ib_free_cq(ep->rep_attr.send_cq);
419         ep->rep_attr.send_cq = NULL;
420
421         /* The ULP is responsible for ensuring all DMA
422          * mappings and MRs are gone.
423          */
424         rpcrdma_reps_destroy(buf);
425         list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
426                 rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
427                 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
428                 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
429         }
430         rpcrdma_mrs_destroy(r_xprt);
431         ib_dealloc_pd(ia->ri_pd);
432         ia->ri_pd = NULL;
433
434         /* Allow waiters to continue */
435         complete(&ia->ri_remove_done);
436
437         trace_xprtrdma_remove(r_xprt);
438 }
439
440 /**
441  * rpcrdma_ia_close - Clean up/close an IA.
442  * @ia: interface adapter to close
443  *
444  */
445 void
446 rpcrdma_ia_close(struct rpcrdma_ia *ia)
447 {
448         if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
449                 if (ia->ri_id->qp)
450                         rdma_destroy_qp(ia->ri_id);
451                 rdma_destroy_id(ia->ri_id);
452         }
453         ia->ri_id = NULL;
454
455         /* If the pd is still busy, xprtrdma missed freeing a resource */
456         if (ia->ri_pd && !IS_ERR(ia->ri_pd))
457                 ib_dealloc_pd(ia->ri_pd);
458         ia->ri_pd = NULL;
459 }
460
461 /**
462  * rpcrdma_ep_create - Create unconnected endpoint
463  * @r_xprt: transport to instantiate
464  *
465  * Returns zero on success, or a negative errno.
466  */
467 int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
468 {
469         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
470         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
471         struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
472         struct ib_cq *sendcq, *recvcq;
473         unsigned int max_sge;
474         int rc;
475
476         ep->rep_max_requests = xprt_rdma_slot_table_entries;
477         ep->rep_inline_send = xprt_rdma_max_inline_write;
478         ep->rep_inline_recv = xprt_rdma_max_inline_read;
479
480         max_sge = min_t(unsigned int, ia->ri_id->device->attrs.max_send_sge,
481                         RPCRDMA_MAX_SEND_SGES);
482         if (max_sge < RPCRDMA_MIN_SEND_SGES) {
483                 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
484                 return -ENOMEM;
485         }
486         ia->ri_max_send_sges = max_sge;
487
488         rc = frwr_open(ia, ep);
489         if (rc)
490                 return rc;
491
492         ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
493         ep->rep_attr.qp_context = ep;
494         ep->rep_attr.srq = NULL;
495         ep->rep_attr.cap.max_send_sge = max_sge;
496         ep->rep_attr.cap.max_recv_sge = 1;
497         ep->rep_attr.cap.max_inline_data = 0;
498         ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
499         ep->rep_attr.qp_type = IB_QPT_RC;
500         ep->rep_attr.port_num = ~0;
501
502         dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
503                 "iovs: send %d recv %d\n",
504                 __func__,
505                 ep->rep_attr.cap.max_send_wr,
506                 ep->rep_attr.cap.max_recv_wr,
507                 ep->rep_attr.cap.max_send_sge,
508                 ep->rep_attr.cap.max_recv_sge);
509
510         ep->rep_send_batch = ep->rep_max_requests >> 3;
511         ep->rep_send_count = ep->rep_send_batch;
512         init_waitqueue_head(&ep->rep_connect_wait);
513         ep->rep_receive_count = 0;
514
515         sendcq = ib_alloc_cq_any(ia->ri_id->device, r_xprt,
516                                  ep->rep_attr.cap.max_send_wr + 1,
517                                  IB_POLL_WORKQUEUE);
518         if (IS_ERR(sendcq)) {
519                 rc = PTR_ERR(sendcq);
520                 goto out1;
521         }
522
523         recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
524                                  ep->rep_attr.cap.max_recv_wr + 1,
525                                  IB_POLL_WORKQUEUE);
526         if (IS_ERR(recvcq)) {
527                 rc = PTR_ERR(recvcq);
528                 goto out2;
529         }
530
531         ep->rep_attr.send_cq = sendcq;
532         ep->rep_attr.recv_cq = recvcq;
533
534         /* Initialize cma parameters */
535         memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
536
537         /* Prepare RDMA-CM private message */
538         pmsg->cp_magic = rpcrdma_cmp_magic;
539         pmsg->cp_version = RPCRDMA_CMP_VERSION;
540         pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
541         pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send);
542         pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv);
543         ep->rep_remote_cma.private_data = pmsg;
544         ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
545
546         /* Client offers RDMA Read but does not initiate */
547         ep->rep_remote_cma.initiator_depth = 0;
548         ep->rep_remote_cma.responder_resources =
549                 min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom);
550
551         /* Limit transport retries so client can detect server
552          * GID changes quickly. RPC layer handles re-establishing
553          * transport connection and retransmission.
554          */
555         ep->rep_remote_cma.retry_count = 6;
556
557         /* RPC-over-RDMA handles its own flow control. In addition,
558          * make all RNR NAKs visible so we know that RPC-over-RDMA
559          * flow control is working correctly (no NAKs should be seen).
560          */
561         ep->rep_remote_cma.flow_control = 0;
562         ep->rep_remote_cma.rnr_retry_count = 0;
563
564         return 0;
565
566 out2:
567         ib_free_cq(sendcq);
568 out1:
569         return rc;
570 }
571
572 /**
573  * rpcrdma_ep_destroy - Disconnect and destroy endpoint.
574  * @r_xprt: transport instance to shut down
575  *
576  */
577 void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt)
578 {
579         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
580         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
581
582         if (ia->ri_id && ia->ri_id->qp) {
583                 rpcrdma_ep_disconnect(ep, ia);
584                 rdma_destroy_qp(ia->ri_id);
585                 ia->ri_id->qp = NULL;
586         }
587
588         if (ep->rep_attr.recv_cq)
589                 ib_free_cq(ep->rep_attr.recv_cq);
590         if (ep->rep_attr.send_cq)
591                 ib_free_cq(ep->rep_attr.send_cq);
592 }
593
594 /* Re-establish a connection after a device removal event.
595  * Unlike a normal reconnection, a fresh PD and a new set
596  * of MRs and buffers is needed.
597  */
598 static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
599                                     struct ib_qp_init_attr *qp_init_attr)
600 {
601         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
602         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
603         int rc, err;
604
605         trace_xprtrdma_reinsert(r_xprt);
606
607         rc = -EHOSTUNREACH;
608         if (rpcrdma_ia_open(r_xprt))
609                 goto out1;
610
611         rc = -ENOMEM;
612         err = rpcrdma_ep_create(r_xprt);
613         if (err) {
614                 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
615                 goto out2;
616         }
617         memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr));
618
619         rc = -ENETUNREACH;
620         err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
621         if (err) {
622                 pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
623                 goto out3;
624         }
625         return 0;
626
627 out3:
628         rpcrdma_ep_destroy(r_xprt);
629 out2:
630         rpcrdma_ia_close(ia);
631 out1:
632         return rc;
633 }
634
635 static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt,
636                                 struct ib_qp_init_attr *qp_init_attr)
637 {
638         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
639         struct rdma_cm_id *id, *old;
640         int err, rc;
641
642         rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia);
643
644         rc = -EHOSTUNREACH;
645         id = rpcrdma_create_id(r_xprt, ia);
646         if (IS_ERR(id))
647                 goto out;
648
649         /* As long as the new ID points to the same device as the
650          * old ID, we can reuse the transport's existing PD and all
651          * previously allocated MRs. Also, the same device means
652          * the transport's previous DMA mappings are still valid.
653          *
654          * This is a sanity check only. There should be no way these
655          * point to two different devices here.
656          */
657         old = id;
658         rc = -ENETUNREACH;
659         if (ia->ri_id->device != id->device) {
660                 pr_err("rpcrdma: can't reconnect on different device!\n");
661                 goto out_destroy;
662         }
663
664         err = rdma_create_qp(id, ia->ri_pd, qp_init_attr);
665         if (err)
666                 goto out_destroy;
667
668         /* Atomically replace the transport's ID and QP. */
669         rc = 0;
670         old = ia->ri_id;
671         ia->ri_id = id;
672         rdma_destroy_qp(old);
673
674 out_destroy:
675         rdma_destroy_id(old);
676 out:
677         return rc;
678 }
679
680 /*
681  * Connect unconnected endpoint.
682  */
683 int
684 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
685 {
686         struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
687                                                    rx_ia);
688         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
689         struct ib_qp_init_attr qp_init_attr;
690         int rc;
691
692 retry:
693         memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr));
694         switch (ep->rep_connected) {
695         case 0:
696                 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr);
697                 if (rc) {
698                         rc = -ENETUNREACH;
699                         goto out_noupdate;
700                 }
701                 break;
702         case -ENODEV:
703                 rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr);
704                 if (rc)
705                         goto out_noupdate;
706                 break;
707         default:
708                 rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr);
709                 if (rc)
710                         goto out;
711         }
712
713         ep->rep_connected = 0;
714         xprt_clear_connected(xprt);
715
716         rpcrdma_reset_cwnd(r_xprt);
717         rpcrdma_post_recvs(r_xprt, true);
718
719         rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
720         if (rc)
721                 goto out;
722
723         if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
724                 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
725         wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
726         if (ep->rep_connected <= 0) {
727                 if (ep->rep_connected == -EAGAIN)
728                         goto retry;
729                 rc = ep->rep_connected;
730                 goto out;
731         }
732
733         rpcrdma_mrs_create(r_xprt);
734
735 out:
736         if (rc)
737                 ep->rep_connected = rc;
738
739 out_noupdate:
740         trace_xprtrdma_connect(r_xprt, rc);
741         return rc;
742 }
743
744 /**
745  * rpcrdma_ep_disconnect - Disconnect underlying transport
746  * @ep: endpoint to disconnect
747  * @ia: associated interface adapter
748  *
749  * Caller serializes. Either the transport send lock is held,
750  * or we're being called to destroy the transport.
751  */
752 void
753 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
754 {
755         struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
756                                                    rx_ep);
757         int rc;
758
759         /* returns without wait if ID is not connected */
760         rc = rdma_disconnect(ia->ri_id);
761         if (!rc)
762                 wait_event_interruptible(ep->rep_connect_wait,
763                                                         ep->rep_connected != 1);
764         else
765                 ep->rep_connected = rc;
766         trace_xprtrdma_disconnect(r_xprt, rc);
767
768         rpcrdma_xprt_drain(r_xprt);
769         rpcrdma_reqs_reset(r_xprt);
770         rpcrdma_mrs_destroy(r_xprt);
771 }
772
773 /* Fixed-size circular FIFO queue. This implementation is wait-free and
774  * lock-free.
775  *
776  * Consumer is the code path that posts Sends. This path dequeues a
777  * sendctx for use by a Send operation. Multiple consumer threads
778  * are serialized by the RPC transport lock, which allows only one
779  * ->send_request call at a time.
780  *
781  * Producer is the code path that handles Send completions. This path
782  * enqueues a sendctx that has been completed. Multiple producer
783  * threads are serialized by the ib_poll_cq() function.
784  */
785
786 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
787  * queue activity, and rpcrdma_xprt_drain has flushed all remaining
788  * Send requests.
789  */
790 static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
791 {
792         unsigned long i;
793
794         for (i = 0; i <= buf->rb_sc_last; i++)
795                 kfree(buf->rb_sc_ctxs[i]);
796         kfree(buf->rb_sc_ctxs);
797 }
798
799 static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
800 {
801         struct rpcrdma_sendctx *sc;
802
803         sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges),
804                      GFP_KERNEL);
805         if (!sc)
806                 return NULL;
807
808         sc->sc_cqe.done = rpcrdma_wc_send;
809         return sc;
810 }
811
812 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
813 {
814         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
815         struct rpcrdma_sendctx *sc;
816         unsigned long i;
817
818         /* Maximum number of concurrent outstanding Send WRs. Capping
819          * the circular queue size stops Send Queue overflow by causing
820          * the ->send_request call to fail temporarily before too many
821          * Sends are posted.
822          */
823         i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
824         dprintk("RPC:       %s: allocating %lu send_ctxs\n", __func__, i);
825         buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
826         if (!buf->rb_sc_ctxs)
827                 return -ENOMEM;
828
829         buf->rb_sc_last = i - 1;
830         for (i = 0; i <= buf->rb_sc_last; i++) {
831                 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
832                 if (!sc)
833                         return -ENOMEM;
834
835                 buf->rb_sc_ctxs[i] = sc;
836         }
837
838         return 0;
839 }
840
841 /* The sendctx queue is not guaranteed to have a size that is a
842  * power of two, thus the helpers in circ_buf.h cannot be used.
843  * The other option is to use modulus (%), which can be expensive.
844  */
845 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
846                                           unsigned long item)
847 {
848         return likely(item < buf->rb_sc_last) ? item + 1 : 0;
849 }
850
851 /**
852  * rpcrdma_sendctx_get_locked - Acquire a send context
853  * @r_xprt: controlling transport instance
854  *
855  * Returns pointer to a free send completion context; or NULL if
856  * the queue is empty.
857  *
858  * Usage: Called to acquire an SGE array before preparing a Send WR.
859  *
860  * The caller serializes calls to this function (per transport), and
861  * provides an effective memory barrier that flushes the new value
862  * of rb_sc_head.
863  */
864 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
865 {
866         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
867         struct rpcrdma_sendctx *sc;
868         unsigned long next_head;
869
870         next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
871
872         if (next_head == READ_ONCE(buf->rb_sc_tail))
873                 goto out_emptyq;
874
875         /* ORDER: item must be accessed _before_ head is updated */
876         sc = buf->rb_sc_ctxs[next_head];
877
878         /* Releasing the lock in the caller acts as a memory
879          * barrier that flushes rb_sc_head.
880          */
881         buf->rb_sc_head = next_head;
882
883         return sc;
884
885 out_emptyq:
886         /* The queue is "empty" if there have not been enough Send
887          * completions recently. This is a sign the Send Queue is
888          * backing up. Cause the caller to pause and try again.
889          */
890         xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
891         r_xprt->rx_stats.empty_sendctx_q++;
892         return NULL;
893 }
894
895 /**
896  * rpcrdma_sendctx_put_locked - Release a send context
897  * @r_xprt: controlling transport instance
898  * @sc: send context to release
899  *
900  * Usage: Called from Send completion to return a sendctxt
901  * to the queue.
902  *
903  * The caller serializes calls to this function (per transport).
904  */
905 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
906                                        struct rpcrdma_sendctx *sc)
907 {
908         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
909         unsigned long next_tail;
910
911         /* Unmap SGEs of previously completed but unsignaled
912          * Sends by walking up the queue until @sc is found.
913          */
914         next_tail = buf->rb_sc_tail;
915         do {
916                 next_tail = rpcrdma_sendctx_next(buf, next_tail);
917
918                 /* ORDER: item must be accessed _before_ tail is updated */
919                 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
920
921         } while (buf->rb_sc_ctxs[next_tail] != sc);
922
923         /* Paired with READ_ONCE */
924         smp_store_release(&buf->rb_sc_tail, next_tail);
925
926         xprt_write_space(&r_xprt->rx_xprt);
927 }
928
929 static void
930 rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
931 {
932         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
933         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
934         unsigned int count;
935
936         for (count = 0; count < ia->ri_max_segs; count++) {
937                 struct rpcrdma_mr *mr;
938                 int rc;
939
940                 mr = kzalloc(sizeof(*mr), GFP_NOFS);
941                 if (!mr)
942                         break;
943
944                 rc = frwr_init_mr(ia, mr);
945                 if (rc) {
946                         kfree(mr);
947                         break;
948                 }
949
950                 mr->mr_xprt = r_xprt;
951
952                 spin_lock(&buf->rb_lock);
953                 rpcrdma_mr_push(mr, &buf->rb_mrs);
954                 list_add(&mr->mr_all, &buf->rb_all_mrs);
955                 spin_unlock(&buf->rb_lock);
956         }
957
958         r_xprt->rx_stats.mrs_allocated += count;
959         trace_xprtrdma_createmrs(r_xprt, count);
960 }
961
962 static void
963 rpcrdma_mr_refresh_worker(struct work_struct *work)
964 {
965         struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
966                                                   rb_refresh_worker);
967         struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
968                                                    rx_buf);
969
970         rpcrdma_mrs_create(r_xprt);
971         xprt_write_space(&r_xprt->rx_xprt);
972 }
973
974 /**
975  * rpcrdma_mrs_refresh - Wake the MR refresh worker
976  * @r_xprt: controlling transport instance
977  *
978  */
979 void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
980 {
981         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
982         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
983
984         /* If there is no underlying device, it's no use to
985          * wake the refresh worker.
986          */
987         if (ep->rep_connected != -ENODEV) {
988                 /* The work is scheduled on a WQ_MEM_RECLAIM
989                  * workqueue in order to prevent MR allocation
990                  * from recursing into NFS during direct reclaim.
991                  */
992                 queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
993         }
994 }
995
996 /**
997  * rpcrdma_req_create - Allocate an rpcrdma_req object
998  * @r_xprt: controlling r_xprt
999  * @size: initial size, in bytes, of send and receive buffers
1000  * @flags: GFP flags passed to memory allocators
1001  *
1002  * Returns an allocated and fully initialized rpcrdma_req or NULL.
1003  */
1004 struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
1005                                        gfp_t flags)
1006 {
1007         struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1008         struct rpcrdma_regbuf *rb;
1009         struct rpcrdma_req *req;
1010         size_t maxhdrsize;
1011
1012         req = kzalloc(sizeof(*req), flags);
1013         if (req == NULL)
1014                 goto out1;
1015
1016         /* Compute maximum header buffer size in bytes */
1017         maxhdrsize = rpcrdma_fixed_maxsz + 3 +
1018                      r_xprt->rx_ia.ri_max_segs * rpcrdma_readchunk_maxsz;
1019         maxhdrsize *= sizeof(__be32);
1020         rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
1021                                   DMA_TO_DEVICE, flags);
1022         if (!rb)
1023                 goto out2;
1024         req->rl_rdmabuf = rb;
1025         xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
1026
1027         req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
1028         if (!req->rl_sendbuf)
1029                 goto out3;
1030
1031         req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
1032         if (!req->rl_recvbuf)
1033                 goto out4;
1034
1035         INIT_LIST_HEAD(&req->rl_free_mrs);
1036         INIT_LIST_HEAD(&req->rl_registered);
1037         spin_lock(&buffer->rb_lock);
1038         list_add(&req->rl_all, &buffer->rb_allreqs);
1039         spin_unlock(&buffer->rb_lock);
1040         return req;
1041
1042 out4:
1043         kfree(req->rl_sendbuf);
1044 out3:
1045         kfree(req->rl_rdmabuf);
1046 out2:
1047         kfree(req);
1048 out1:
1049         return NULL;
1050 }
1051
1052 /**
1053  * rpcrdma_reqs_reset - Reset all reqs owned by a transport
1054  * @r_xprt: controlling transport instance
1055  *
1056  * ASSUMPTION: the rb_allreqs list is stable for the duration,
1057  * and thus can be walked without holding rb_lock. Eg. the
1058  * caller is holding the transport send lock to exclude
1059  * device removal or disconnection.
1060  */
1061 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
1062 {
1063         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1064         struct rpcrdma_req *req;
1065
1066         list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
1067                 /* Credits are valid only for one connection */
1068                 req->rl_slot.rq_cong = 0;
1069         }
1070 }
1071
1072 static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
1073                                               bool temp)
1074 {
1075         struct rpcrdma_rep *rep;
1076
1077         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1078         if (rep == NULL)
1079                 goto out;
1080
1081         rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv,
1082                                                DMA_FROM_DEVICE, GFP_KERNEL);
1083         if (!rep->rr_rdmabuf)
1084                 goto out_free;
1085
1086         xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
1087                      rdmab_length(rep->rr_rdmabuf));
1088         rep->rr_cqe.done = rpcrdma_wc_receive;
1089         rep->rr_rxprt = r_xprt;
1090         rep->rr_recv_wr.next = NULL;
1091         rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
1092         rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1093         rep->rr_recv_wr.num_sge = 1;
1094         rep->rr_temp = temp;
1095         return rep;
1096
1097 out_free:
1098         kfree(rep);
1099 out:
1100         return NULL;
1101 }
1102
1103 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
1104 {
1105         rpcrdma_regbuf_free(rep->rr_rdmabuf);
1106         kfree(rep);
1107 }
1108
1109 static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
1110 {
1111         struct llist_node *node;
1112
1113         /* Calls to llist_del_first are required to be serialized */
1114         node = llist_del_first(&buf->rb_free_reps);
1115         if (!node)
1116                 return NULL;
1117         return llist_entry(node, struct rpcrdma_rep, rr_node);
1118 }
1119
1120 static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
1121                             struct rpcrdma_rep *rep)
1122 {
1123         if (!rep->rr_temp)
1124                 llist_add(&rep->rr_node, &buf->rb_free_reps);
1125         else
1126                 rpcrdma_rep_destroy(rep);
1127 }
1128
1129 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
1130 {
1131         struct rpcrdma_rep *rep;
1132
1133         while ((rep = rpcrdma_rep_get_locked(buf)) != NULL)
1134                 rpcrdma_rep_destroy(rep);
1135 }
1136
1137 /**
1138  * rpcrdma_buffer_create - Create initial set of req/rep objects
1139  * @r_xprt: transport instance to (re)initialize
1140  *
1141  * Returns zero on success, otherwise a negative errno.
1142  */
1143 int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1144 {
1145         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1146         int i, rc;
1147
1148         buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests;
1149         buf->rb_bc_srv_max_requests = 0;
1150         spin_lock_init(&buf->rb_lock);
1151         INIT_LIST_HEAD(&buf->rb_mrs);
1152         INIT_LIST_HEAD(&buf->rb_all_mrs);
1153         INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
1154
1155         INIT_LIST_HEAD(&buf->rb_send_bufs);
1156         INIT_LIST_HEAD(&buf->rb_allreqs);
1157
1158         rc = -ENOMEM;
1159         for (i = 0; i < buf->rb_max_requests; i++) {
1160                 struct rpcrdma_req *req;
1161
1162                 req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
1163                                          GFP_KERNEL);
1164                 if (!req)
1165                         goto out;
1166                 list_add(&req->rl_list, &buf->rb_send_bufs);
1167         }
1168
1169         init_llist_head(&buf->rb_free_reps);
1170
1171         rc = rpcrdma_sendctxs_create(r_xprt);
1172         if (rc)
1173                 goto out;
1174
1175         return 0;
1176 out:
1177         rpcrdma_buffer_destroy(buf);
1178         return rc;
1179 }
1180
1181 /**
1182  * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1183  * @req: unused object to be destroyed
1184  *
1185  * Relies on caller holding the transport send lock to protect
1186  * removing req->rl_all from buf->rb_all_reqs safely.
1187  */
1188 void rpcrdma_req_destroy(struct rpcrdma_req *req)
1189 {
1190         struct rpcrdma_mr *mr;
1191
1192         list_del(&req->rl_all);
1193
1194         while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
1195                 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
1196
1197                 spin_lock(&buf->rb_lock);
1198                 list_del(&mr->mr_all);
1199                 spin_unlock(&buf->rb_lock);
1200
1201                 frwr_release_mr(mr);
1202         }
1203
1204         rpcrdma_regbuf_free(req->rl_recvbuf);
1205         rpcrdma_regbuf_free(req->rl_sendbuf);
1206         rpcrdma_regbuf_free(req->rl_rdmabuf);
1207         kfree(req);
1208 }
1209
1210 /**
1211  * rpcrdma_mrs_destroy - Release all of a transport's MRs
1212  * @r_xprt: controlling transport instance
1213  *
1214  * Relies on caller holding the transport send lock to protect
1215  * removing mr->mr_list from req->rl_free_mrs safely.
1216  */
1217 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt)
1218 {
1219         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1220         struct rpcrdma_mr *mr;
1221
1222         cancel_work_sync(&buf->rb_refresh_worker);
1223
1224         spin_lock(&buf->rb_lock);
1225         while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
1226                                               struct rpcrdma_mr,
1227                                               mr_all)) != NULL) {
1228                 list_del(&mr->mr_list);
1229                 list_del(&mr->mr_all);
1230                 spin_unlock(&buf->rb_lock);
1231
1232                 frwr_release_mr(mr);
1233
1234                 spin_lock(&buf->rb_lock);
1235         }
1236         spin_unlock(&buf->rb_lock);
1237 }
1238
1239 /**
1240  * rpcrdma_buffer_destroy - Release all hw resources
1241  * @buf: root control block for resources
1242  *
1243  * ORDERING: relies on a prior rpcrdma_xprt_drain :
1244  * - No more Send or Receive completions can occur
1245  * - All MRs, reps, and reqs are returned to their free lists
1246  */
1247 void
1248 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1249 {
1250         rpcrdma_sendctxs_destroy(buf);
1251         rpcrdma_reps_destroy(buf);
1252
1253         while (!list_empty(&buf->rb_send_bufs)) {
1254                 struct rpcrdma_req *req;
1255
1256                 req = list_first_entry(&buf->rb_send_bufs,
1257                                        struct rpcrdma_req, rl_list);
1258                 list_del(&req->rl_list);
1259                 rpcrdma_req_destroy(req);
1260         }
1261 }
1262
1263 /**
1264  * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1265  * @r_xprt: controlling transport
1266  *
1267  * Returns an initialized rpcrdma_mr or NULL if no free
1268  * rpcrdma_mr objects are available.
1269  */
1270 struct rpcrdma_mr *
1271 rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1272 {
1273         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1274         struct rpcrdma_mr *mr;
1275
1276         spin_lock(&buf->rb_lock);
1277         mr = rpcrdma_mr_pop(&buf->rb_mrs);
1278         spin_unlock(&buf->rb_lock);
1279         return mr;
1280 }
1281
1282 /**
1283  * rpcrdma_mr_put - DMA unmap an MR and release it
1284  * @mr: MR to release
1285  *
1286  */
1287 void rpcrdma_mr_put(struct rpcrdma_mr *mr)
1288 {
1289         struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1290
1291         if (mr->mr_dir != DMA_NONE) {
1292                 trace_xprtrdma_mr_unmap(mr);
1293                 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
1294                                 mr->mr_sg, mr->mr_nents, mr->mr_dir);
1295                 mr->mr_dir = DMA_NONE;
1296         }
1297
1298         rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
1299 }
1300
1301 /**
1302  * rpcrdma_buffer_get - Get a request buffer
1303  * @buffers: Buffer pool from which to obtain a buffer
1304  *
1305  * Returns a fresh rpcrdma_req, or NULL if none are available.
1306  */
1307 struct rpcrdma_req *
1308 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1309 {
1310         struct rpcrdma_req *req;
1311
1312         spin_lock(&buffers->rb_lock);
1313         req = list_first_entry_or_null(&buffers->rb_send_bufs,
1314                                        struct rpcrdma_req, rl_list);
1315         if (req)
1316                 list_del_init(&req->rl_list);
1317         spin_unlock(&buffers->rb_lock);
1318         return req;
1319 }
1320
1321 /**
1322  * rpcrdma_buffer_put - Put request/reply buffers back into pool
1323  * @buffers: buffer pool
1324  * @req: object to return
1325  *
1326  */
1327 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1328 {
1329         if (req->rl_reply)
1330                 rpcrdma_rep_put(buffers, req->rl_reply);
1331         req->rl_reply = NULL;
1332
1333         spin_lock(&buffers->rb_lock);
1334         list_add(&req->rl_list, &buffers->rb_send_bufs);
1335         spin_unlock(&buffers->rb_lock);
1336 }
1337
1338 /**
1339  * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
1340  * @rep: rep to release
1341  *
1342  * Used after error conditions.
1343  */
1344 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1345 {
1346         rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
1347 }
1348
1349 /* Returns a pointer to a rpcrdma_regbuf object, or NULL.
1350  *
1351  * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1352  * receiving the payload of RDMA RECV operations. During Long Calls
1353  * or Replies they may be registered externally via frwr_map.
1354  */
1355 static struct rpcrdma_regbuf *
1356 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
1357                      gfp_t flags)
1358 {
1359         struct rpcrdma_regbuf *rb;
1360
1361         rb = kmalloc(sizeof(*rb), flags);
1362         if (!rb)
1363                 return NULL;
1364         rb->rg_data = kmalloc(size, flags);
1365         if (!rb->rg_data) {
1366                 kfree(rb);
1367                 return NULL;
1368         }
1369
1370         rb->rg_device = NULL;
1371         rb->rg_direction = direction;
1372         rb->rg_iov.length = size;
1373         return rb;
1374 }
1375
1376 /**
1377  * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1378  * @rb: regbuf to reallocate
1379  * @size: size of buffer to be allocated, in bytes
1380  * @flags: GFP flags
1381  *
1382  * Returns true if reallocation was successful. If false is
1383  * returned, @rb is left untouched.
1384  */
1385 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
1386 {
1387         void *buf;
1388
1389         buf = kmalloc(size, flags);
1390         if (!buf)
1391                 return false;
1392
1393         rpcrdma_regbuf_dma_unmap(rb);
1394         kfree(rb->rg_data);
1395
1396         rb->rg_data = buf;
1397         rb->rg_iov.length = size;
1398         return true;
1399 }
1400
1401 /**
1402  * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1403  * @r_xprt: controlling transport instance
1404  * @rb: regbuf to be mapped
1405  *
1406  * Returns true if the buffer is now DMA mapped to @r_xprt's device
1407  */
1408 bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
1409                               struct rpcrdma_regbuf *rb)
1410 {
1411         struct ib_device *device = r_xprt->rx_ia.ri_id->device;
1412
1413         if (rb->rg_direction == DMA_NONE)
1414                 return false;
1415
1416         rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
1417                                             rdmab_length(rb), rb->rg_direction);
1418         if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
1419                 trace_xprtrdma_dma_maperr(rdmab_addr(rb));
1420                 return false;
1421         }
1422
1423         rb->rg_device = device;
1424         rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey;
1425         return true;
1426 }
1427
1428 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
1429 {
1430         if (!rb)
1431                 return;
1432
1433         if (!rpcrdma_regbuf_is_mapped(rb))
1434                 return;
1435
1436         ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
1437                             rb->rg_direction);
1438         rb->rg_device = NULL;
1439 }
1440
1441 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
1442 {
1443         rpcrdma_regbuf_dma_unmap(rb);
1444         if (rb)
1445                 kfree(rb->rg_data);
1446         kfree(rb);
1447 }
1448
1449 /**
1450  * rpcrdma_ep_post - Post WRs to a transport's Send Queue
1451  * @ia: transport's device information
1452  * @ep: transport's RDMA endpoint information
1453  * @req: rpcrdma_req containing the Send WR to post
1454  *
1455  * Returns 0 if the post was successful, otherwise -ENOTCONN
1456  * is returned.
1457  */
1458 int
1459 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1460                 struct rpcrdma_ep *ep,
1461                 struct rpcrdma_req *req)
1462 {
1463         struct ib_send_wr *send_wr = &req->rl_wr;
1464         int rc;
1465
1466         if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) {
1467                 send_wr->send_flags |= IB_SEND_SIGNALED;
1468                 ep->rep_send_count = ep->rep_send_batch;
1469         } else {
1470                 send_wr->send_flags &= ~IB_SEND_SIGNALED;
1471                 --ep->rep_send_count;
1472         }
1473
1474         rc = frwr_send(ia, req);
1475         trace_xprtrdma_post_send(req, rc);
1476         if (rc)
1477                 return -ENOTCONN;
1478         return 0;
1479 }
1480
1481 /**
1482  * rpcrdma_post_recvs - Refill the Receive Queue
1483  * @r_xprt: controlling transport instance
1484  * @temp: mark Receive buffers to be deleted after use
1485  *
1486  */
1487 void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
1488 {
1489         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1490         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
1491         struct ib_recv_wr *i, *wr, *bad_wr;
1492         struct rpcrdma_rep *rep;
1493         int needed, count, rc;
1494
1495         rc = 0;
1496         count = 0;
1497
1498         needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
1499         if (likely(ep->rep_receive_count > needed))
1500                 goto out;
1501         needed -= ep->rep_receive_count;
1502         if (!temp)
1503                 needed += RPCRDMA_MAX_RECV_BATCH;
1504
1505         /* fast path: all needed reps can be found on the free list */
1506         wr = NULL;
1507         while (needed) {
1508                 rep = rpcrdma_rep_get_locked(buf);
1509                 if (!rep)
1510                         rep = rpcrdma_rep_create(r_xprt, temp);
1511                 if (!rep)
1512                         break;
1513
1514                 rep->rr_recv_wr.next = wr;
1515                 wr = &rep->rr_recv_wr;
1516                 --needed;
1517         }
1518         if (!wr)
1519                 goto out;
1520
1521         for (i = wr; i; i = i->next) {
1522                 rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
1523
1524                 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
1525                         goto release_wrs;
1526
1527                 trace_xprtrdma_post_recv(rep);
1528                 ++count;
1529         }
1530
1531         rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
1532                           (const struct ib_recv_wr **)&bad_wr);
1533 out:
1534         trace_xprtrdma_post_recvs(r_xprt, count, rc);
1535         if (rc) {
1536                 for (wr = bad_wr; wr;) {
1537                         struct rpcrdma_rep *rep;
1538
1539                         rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1540                         wr = wr->next;
1541                         rpcrdma_recv_buffer_put(rep);
1542                         --count;
1543                 }
1544         }
1545         ep->rep_receive_count += count;
1546         return;
1547
1548 release_wrs:
1549         for (i = wr; i;) {
1550                 rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
1551                 i = i->next;
1552                 rpcrdma_recv_buffer_put(rep);
1553         }
1554 }