1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
6 #include <linux/module.h>
7 #include <linux/mlx5/qp.h>
8 #include <linux/slab.h>
9 #include <rdma/ib_umem.h>
10 #include <rdma/ib_user_verbs.h>
14 static void *get_wqe(struct mlx5_ib_srq *srq, int n)
16 return mlx5_frag_buf_get_wqe(&srq->fbc, n);
19 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
21 struct ib_event event;
22 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
24 if (ibsrq->event_handler) {
25 event.device = ibsrq->device;
26 event.element.srq = ibsrq;
28 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
29 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
31 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
32 event.event = IB_EVENT_SRQ_ERR;
35 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
40 ibsrq->event_handler(&event, ibsrq->srq_context);
44 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
45 struct mlx5_srq_attr *in,
46 struct ib_udata *udata, int buf_size)
48 struct mlx5_ib_dev *dev = to_mdev(pd->device);
49 struct mlx5_ib_create_srq ucmd = {};
50 struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
51 udata, struct mlx5_ib_ucontext, ibucontext);
58 u32 uidx = MLX5_IB_DEFAULT_UIDX;
60 ucmdlen = min(udata->inlen, sizeof(ucmd));
62 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
63 mlx5_ib_dbg(dev, "failed copy udata\n");
67 if (ucmd.reserved0 || ucmd.reserved1)
70 if (udata->inlen > sizeof(ucmd) &&
71 !ib_is_udata_cleared(udata, sizeof(ucmd),
72 udata->inlen - sizeof(ucmd)))
75 if (in->type != IB_SRQT_BASIC) {
76 err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx);
81 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
83 srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
84 if (IS_ERR(srq->umem)) {
85 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
86 err = PTR_ERR(srq->umem);
90 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
91 &page_shift, &ncont, NULL);
92 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
95 mlx5_ib_warn(dev, "bad offset\n");
99 in->pas = kvcalloc(ncont, sizeof(*in->pas), GFP_KERNEL);
105 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
107 err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db);
109 mlx5_ib_dbg(dev, "map doorbell failed\n");
113 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
114 in->page_offset = offset;
115 in->uid = (in->type != IB_SRQT_XRC) ? to_mpd(pd)->uid : 0;
116 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
117 in->type != IB_SRQT_BASIC)
118 in->user_index = uidx;
126 ib_umem_release(srq->umem);
131 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
132 struct mlx5_srq_attr *in, int buf_size)
136 struct mlx5_wqe_srq_next_seg *next;
138 err = mlx5_db_alloc(dev->mdev, &srq->db);
140 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
144 if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf,
145 dev->mdev->priv.numa_node)) {
146 mlx5_ib_dbg(dev, "buf alloc failed\n");
151 mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max),
155 srq->tail = srq->msrq.max - 1;
158 for (i = 0; i < srq->msrq.max; i++) {
159 next = get_wqe(srq, i);
160 next->next_wqe_index =
161 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
164 mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
165 in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
170 mlx5_fill_page_frag_array(&srq->buf, in->pas);
172 srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
179 in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
180 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
181 in->type != IB_SRQT_BASIC)
182 in->user_index = MLX5_IB_DEFAULT_UIDX;
190 mlx5_frag_buf_free(dev->mdev, &srq->buf);
193 mlx5_db_free(dev->mdev, &srq->db);
197 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
198 struct ib_udata *udata)
200 mlx5_ib_db_unmap_user(
201 rdma_udata_to_drv_context(
203 struct mlx5_ib_ucontext,
206 ib_umem_release(srq->umem);
210 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
213 mlx5_frag_buf_free(dev->mdev, &srq->buf);
214 mlx5_db_free(dev->mdev, &srq->db);
217 int mlx5_ib_create_srq(struct ib_srq *ib_srq,
218 struct ib_srq_init_attr *init_attr,
219 struct ib_udata *udata)
221 struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
222 struct mlx5_ib_srq *srq = to_msrq(ib_srq);
226 struct mlx5_srq_attr in = {};
227 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
229 /* Sanity check SRQ size before proceeding */
230 if (init_attr->attr.max_wr >= max_srq_wqes) {
231 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
232 init_attr->attr.max_wr,
237 mutex_init(&srq->mutex);
238 spin_lock_init(&srq->lock);
239 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
240 srq->msrq.max_gs = init_attr->attr.max_sge;
242 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
243 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
244 if (desc_size == 0 || srq->msrq.max_gs > desc_size)
247 desc_size = roundup_pow_of_two(desc_size);
248 desc_size = max_t(size_t, 32, desc_size);
249 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
252 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
253 sizeof(struct mlx5_wqe_data_seg);
254 srq->msrq.wqe_shift = ilog2(desc_size);
255 buf_size = srq->msrq.max * desc_size;
256 if (buf_size < desc_size)
259 in.type = init_attr->srq_type;
262 err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
264 err = create_srq_kernel(dev, srq, &in, buf_size);
267 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
268 udata ? "user" : "kernel", err);
272 in.log_size = ilog2(srq->msrq.max);
273 in.wqe_shift = srq->msrq.wqe_shift - 4;
275 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
277 if (init_attr->srq_type == IB_SRQT_XRC)
278 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
280 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
282 if (init_attr->srq_type == IB_SRQT_TM) {
283 in.tm_log_list_size =
284 ilog2(init_attr->ext.tag_matching.max_num_tags) + 1;
285 if (in.tm_log_list_size >
286 MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
287 mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n");
289 goto err_usr_kern_srq;
291 in.flags |= MLX5_SRQ_FLAG_RNDV;
294 if (ib_srq_has_cq(init_attr->srq_type))
295 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn;
297 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
299 in.pd = to_mpd(ib_srq->pd)->pdn;
300 in.db_record = srq->db.dma;
301 err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
304 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
305 goto err_usr_kern_srq;
308 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
310 srq->msrq.event = mlx5_ib_srq_event;
311 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
314 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
315 mlx5_ib_dbg(dev, "copy to user failed\n");
320 init_attr->attr.max_wr = srq->msrq.max - 1;
325 mlx5_cmd_destroy_srq(dev, &srq->msrq);
329 destroy_srq_user(ib_srq->pd, srq, udata);
331 destroy_srq_kernel(dev, srq);
336 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
337 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
339 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
340 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
343 /* We don't support resizing SRQs yet */
344 if (attr_mask & IB_SRQ_MAX_WR)
347 if (attr_mask & IB_SRQ_LIMIT) {
348 if (attr->srq_limit >= srq->msrq.max)
351 mutex_lock(&srq->mutex);
352 ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
353 mutex_unlock(&srq->mutex);
362 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
364 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
365 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
367 struct mlx5_srq_attr *out;
369 out = kzalloc(sizeof(*out), GFP_KERNEL);
373 ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
377 srq_attr->srq_limit = out->lwm;
378 srq_attr->max_wr = srq->msrq.max - 1;
379 srq_attr->max_sge = srq->msrq.max_gs;
386 void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
388 struct mlx5_ib_dev *dev = to_mdev(srq->device);
389 struct mlx5_ib_srq *msrq = to_msrq(srq);
391 mlx5_cmd_destroy_srq(dev, &msrq->msrq);
394 mlx5_ib_db_unmap_user(
395 rdma_udata_to_drv_context(
397 struct mlx5_ib_ucontext,
400 ib_umem_release(msrq->umem);
402 destroy_srq_kernel(dev, msrq);
406 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
408 struct mlx5_wqe_srq_next_seg *next;
410 /* always called with interrupts disabled. */
411 spin_lock(&srq->lock);
413 next = get_wqe(srq, srq->tail);
414 next->next_wqe_index = cpu_to_be16(wqe_index);
415 srq->tail = wqe_index;
417 spin_unlock(&srq->lock);
420 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
421 const struct ib_recv_wr **bad_wr)
423 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
424 struct mlx5_wqe_srq_next_seg *next;
425 struct mlx5_wqe_data_seg *scat;
426 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
427 struct mlx5_core_dev *mdev = dev->mdev;
433 spin_lock_irqsave(&srq->lock, flags);
435 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
441 for (nreq = 0; wr; nreq++, wr = wr->next) {
442 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
448 if (unlikely(srq->head == srq->tail)) {
454 srq->wrid[srq->head] = wr->wr_id;
456 next = get_wqe(srq, srq->head);
457 srq->head = be16_to_cpu(next->next_wqe_index);
458 scat = (struct mlx5_wqe_data_seg *)(next + 1);
460 for (i = 0; i < wr->num_sge; i++) {
461 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
462 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
463 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
466 if (i < srq->msrq.max_avail_gather) {
467 scat[i].byte_count = 0;
468 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
474 srq->wqe_ctr += nreq;
476 /* Make sure that descriptors are written before
481 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
484 spin_unlock_irqrestore(&srq->lock, flags);