2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
44 #include <linux/sunrpc/xdr.h>
45 #include <linux/sunrpc/svcsock.h>
46 #include <linux/sunrpc/rpc_rdma.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
51 /* Default and maximum inline threshold sizes */
53 RPCRDMA_DEF_INLINE_THRESH = 4096,
54 RPCRDMA_MAX_INLINE_THRESH = 65536
57 /* RPC/RDMA parameters and stats */
58 extern unsigned int svcrdma_ord;
59 extern unsigned int svcrdma_max_requests;
60 extern unsigned int svcrdma_max_bc_requests;
61 extern unsigned int svcrdma_max_req_size;
63 extern atomic_t rdma_stat_recv;
64 extern atomic_t rdma_stat_read;
65 extern atomic_t rdma_stat_write;
66 extern atomic_t rdma_stat_sq_starve;
67 extern atomic_t rdma_stat_rq_starve;
68 extern atomic_t rdma_stat_rq_poll;
69 extern atomic_t rdma_stat_rq_prod;
70 extern atomic_t rdma_stat_sq_poll;
71 extern atomic_t rdma_stat_sq_prod;
74 * Contexts are built when an RDMA request is created and are a
75 * record of the resources that can be recovered when the request
78 struct svc_rdma_op_ctxt {
79 struct list_head list;
80 struct svc_rdma_op_ctxt *read_hdr;
81 struct svc_rdma_fastreg_mr *frmr;
85 struct ib_cqe reg_cqe;
86 struct ib_cqe inv_cqe;
89 struct svcxprt_rdma *xprt;
91 enum dma_data_direction direction;
93 unsigned int mapped_sges;
94 struct ib_send_wr send_wr;
95 struct ib_sge sge[1 + RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE];
96 struct page *pages[RPCSVC_MAXPAGES];
100 * NFS_ requests are mapped on the client side by the chunk lists in
101 * the RPCRDMA header. During the fetching of the RPC from the client
102 * and the writing of the reply to the client, the memory in the
103 * client and the memory in the server must be mapped as contiguous
104 * vaddr/len for access by the hardware. These data strucures keep
107 * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the
108 * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
109 * 'ch' field maps the read-list of the RPCRDMA header to the 'sge'
110 * mapping of the reply.
112 struct svc_rdma_chunk_sge {
113 int start; /* sge no for this chunk */
114 int count; /* sge count for this chunk */
116 struct svc_rdma_fastreg_mr {
118 struct scatterlist *sg;
120 unsigned long access_flags;
121 enum dma_data_direction direction;
122 struct list_head frmr_list;
124 struct svc_rdma_req_map {
125 struct list_head free;
128 struct kvec sge[RPCSVC_MAXPAGES];
129 struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES];
130 unsigned long lkey[RPCSVC_MAXPAGES];
133 #define RDMACTXT_F_LAST_CTXT 2
135 #define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */
136 #define SVCRDMA_DEVCAP_READ_W_INV 2 /* read w/ invalidate */
138 struct svcxprt_rdma {
139 struct svc_xprt sc_xprt; /* SVC transport structure */
140 struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
141 struct list_head sc_accept_q; /* Conn. waiting accept */
142 int sc_ord; /* RDMA read limit */
144 int sc_max_sge_rd; /* max sge for read target */
145 bool sc_snd_w_inv; /* OK to use Send With Invalidate */
147 atomic_t sc_sq_avail; /* SQEs ready to be consumed */
148 unsigned int sc_sq_depth; /* Depth of SQ */
149 unsigned int sc_rq_depth; /* Depth of RQ */
150 __be32 sc_fc_credits; /* Forward credits */
151 u32 sc_max_requests; /* Max requests */
152 u32 sc_max_bc_requests;/* Backward credits */
153 int sc_max_req_size; /* Size of each RQ WR buf */
158 spinlock_t sc_ctxt_lock;
159 struct list_head sc_ctxts;
161 spinlock_t sc_rw_ctxt_lock;
162 struct list_head sc_rw_ctxts;
163 spinlock_t sc_map_lock;
164 struct list_head sc_maps;
166 struct list_head sc_rq_dto_q;
167 spinlock_t sc_rq_dto_lock;
169 struct ib_cq *sc_rq_cq;
170 struct ib_cq *sc_sq_cq;
171 int (*sc_reader)(struct svcxprt_rdma *,
173 struct svc_rdma_op_ctxt *,
174 int *, u32 *, u32, u32, u64, bool);
175 u32 sc_dev_caps; /* distilled device caps */
176 unsigned int sc_frmr_pg_list_len;
177 struct list_head sc_frmr_q;
178 spinlock_t sc_frmr_q_lock;
180 spinlock_t sc_lock; /* transport lock */
182 wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
183 unsigned long sc_flags;
184 struct list_head sc_read_complete_q;
185 struct work_struct sc_work;
188 #define RDMAXPRT_CONN_PENDING 3
190 #define RPCRDMA_LISTEN_BACKLOG 10
191 /* The default ORD value is based on two outstanding full-size writes with a
192 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
193 #define RPCRDMA_ORD (64/4)
194 #define RPCRDMA_MAX_REQUESTS 32
196 /* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
197 * current NFSv4.1 implementation supports one backchannel slot.
199 #define RPCRDMA_MAX_BC_REQUESTS 2
201 #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
203 /* Track DMA maps for this transport and context */
204 static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
205 struct svc_rdma_op_ctxt *ctxt)
210 /* svc_rdma_backchannel.c */
211 extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
213 struct xdr_buf *rcvbuf);
215 /* svc_rdma_marshal.c */
216 extern int svc_rdma_xdr_decode_req(struct xdr_buf *);
217 extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
218 extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
219 __be32, __be64, u32);
220 extern unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp);
222 /* svc_rdma_recvfrom.c */
223 extern int svc_rdma_recvfrom(struct svc_rqst *);
224 extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
225 struct svc_rdma_op_ctxt *, int *, u32 *,
226 u32, u32, u64, bool);
227 extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
228 struct svc_rdma_op_ctxt *, int *, u32 *,
229 u32, u32, u64, bool);
232 extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
233 extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
234 __be32 *wr_ch, struct xdr_buf *xdr);
235 extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
236 __be32 *rp_ch, bool writelist,
237 struct xdr_buf *xdr);
239 /* svc_rdma_sendto.c */
240 extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
241 struct svc_rdma_req_map *, bool);
242 extern int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
243 struct svc_rdma_op_ctxt *ctxt,
244 __be32 *rdma_resp, unsigned int len);
245 extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
246 struct svc_rdma_op_ctxt *ctxt,
247 int num_sge, u32 inv_rkey);
248 extern int svc_rdma_sendto(struct svc_rqst *);
250 /* svc_rdma_transport.c */
251 extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *);
252 extern void svc_rdma_wc_write(struct ib_cq *, struct ib_wc *);
253 extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
254 extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
255 extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
256 extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
257 extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
258 extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
259 extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
260 extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
261 extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
262 extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
263 extern struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *);
264 extern void svc_rdma_put_req_map(struct svcxprt_rdma *,
265 struct svc_rdma_req_map *);
266 extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
267 extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
268 struct svc_rdma_fastreg_mr *);
269 extern void svc_sq_reap(struct svcxprt_rdma *);
270 extern void svc_rq_reap(struct svcxprt_rdma *);
271 extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
273 extern struct svc_xprt_class svc_rdma_class;
274 #ifdef CONFIG_SUNRPC_BACKCHANNEL
275 extern struct svc_xprt_class svc_rdma_bc_class;
279 extern struct workqueue_struct *svc_rdma_wq;
280 extern int svc_rdma_init(void);
281 extern void svc_rdma_cleanup(void);