1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
47 #include <linux/qed/qed_if.h>
50 #include <rdma/qedr-abi.h>
53 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
60 *pkey = QEDR_ROCE_PKEY_DEFAULT;
64 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
67 struct qedr_dev *dev = get_qedr_dev(ibdev);
70 if (!rdma_cap_roce_gid_table(ibdev, port))
73 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
75 memcpy(sgid, &zgid, sizeof(*sgid));
79 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80 sgid->global.interface_id, sgid->global.subnet_prefix);
85 int qedr_add_gid(struct ib_device *device, u8 port_num,
86 unsigned int index, const union ib_gid *gid,
87 const struct ib_gid_attr *attr, void **context)
89 if (!rdma_cap_roce_gid_table(device, port_num))
92 if (port_num > QEDR_MAX_PORT)
101 int qedr_del_gid(struct ib_device *device, u8 port_num,
102 unsigned int index, void **context)
104 if (!rdma_cap_roce_gid_table(device, port_num))
107 if (port_num > QEDR_MAX_PORT)
116 int qedr_query_device(struct ib_device *ibdev,
117 struct ib_device_attr *attr, struct ib_udata *udata)
119 struct qedr_dev *dev = get_qedr_dev(ibdev);
120 struct qedr_device_attr *qattr = &dev->attr;
122 if (!dev->rdma_ctx) {
124 "qedr_query_device called with invalid params rdma_ctx=%p\n",
129 memset(attr, 0, sizeof(*attr));
131 attr->fw_ver = qattr->fw_ver;
132 attr->sys_image_guid = qattr->sys_image_guid;
133 attr->max_mr_size = qattr->max_mr_size;
134 attr->page_size_cap = qattr->page_size_caps;
135 attr->vendor_id = qattr->vendor_id;
136 attr->vendor_part_id = qattr->vendor_part_id;
137 attr->hw_ver = qattr->hw_ver;
138 attr->max_qp = qattr->max_qp;
139 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141 IB_DEVICE_RC_RNR_NAK_GEN |
142 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
144 attr->max_sge = qattr->max_sge;
145 attr->max_sge_rd = qattr->max_sge;
146 attr->max_cq = qattr->max_cq;
147 attr->max_cqe = qattr->max_cqe;
148 attr->max_mr = qattr->max_mr;
149 attr->max_mw = qattr->max_mw;
150 attr->max_pd = qattr->max_pd;
151 attr->atomic_cap = dev->atomic_cap;
152 attr->max_fmr = qattr->max_fmr;
153 attr->max_map_per_fmr = 16;
154 attr->max_qp_init_rd_atom =
155 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156 attr->max_qp_rd_atom =
157 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158 attr->max_qp_init_rd_atom);
160 attr->max_srq = qattr->max_srq;
161 attr->max_srq_sge = qattr->max_srq_sge;
162 attr->max_srq_wr = qattr->max_srq_wr;
164 attr->local_ca_ack_delay = qattr->dev_ack_delay;
165 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167 attr->max_ah = qattr->max_ah;
172 #define QEDR_SPEED_SDR (1)
173 #define QEDR_SPEED_DDR (2)
174 #define QEDR_SPEED_QDR (4)
175 #define QEDR_SPEED_FDR10 (8)
176 #define QEDR_SPEED_FDR (16)
177 #define QEDR_SPEED_EDR (32)
179 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
184 *ib_speed = QEDR_SPEED_SDR;
185 *ib_width = IB_WIDTH_1X;
188 *ib_speed = QEDR_SPEED_QDR;
189 *ib_width = IB_WIDTH_1X;
193 *ib_speed = QEDR_SPEED_DDR;
194 *ib_width = IB_WIDTH_4X;
198 *ib_speed = QEDR_SPEED_EDR;
199 *ib_width = IB_WIDTH_1X;
203 *ib_speed = QEDR_SPEED_QDR;
204 *ib_width = IB_WIDTH_4X;
208 *ib_speed = QEDR_SPEED_QDR;
209 *ib_width = IB_WIDTH_4X;
213 *ib_speed = QEDR_SPEED_EDR;
214 *ib_width = IB_WIDTH_4X;
219 *ib_speed = QEDR_SPEED_SDR;
220 *ib_width = IB_WIDTH_1X;
224 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
226 struct qedr_dev *dev;
227 struct qed_rdma_port *rdma_port;
229 dev = get_qedr_dev(ibdev);
231 DP_ERR(dev, "invalid_port=0x%x\n", port);
235 if (!dev->rdma_ctx) {
236 DP_ERR(dev, "rdma_ctx is NULL\n");
240 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241 memset(attr, 0, sizeof(*attr));
243 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244 attr->state = IB_PORT_ACTIVE;
245 attr->phys_state = 5;
247 attr->state = IB_PORT_DOWN;
248 attr->phys_state = 3;
250 attr->max_mtu = IB_MTU_4096;
251 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
256 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257 attr->gid_tbl_len = QEDR_MAX_SGID;
258 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260 attr->qkey_viol_cntr = 0;
261 get_link_speed_and_width(rdma_port->link_speed,
262 &attr->active_speed, &attr->active_width);
263 attr->max_msg_sz = rdma_port->max_msg_size;
264 attr->max_vl_num = 4;
269 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270 struct ib_port_modify *props)
272 struct qedr_dev *dev;
274 dev = get_qedr_dev(ibdev);
276 DP_ERR(dev, "invalid_port=0x%x\n", port);
283 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
288 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
292 mm->key.phy_addr = phy_addr;
293 /* This function might be called with a length which is not a multiple
294 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295 * forces this granularity by increasing the requested size if needed.
296 * When qedr_mmap is called, it will search the list with the updated
297 * length as a key. To prevent search failures, the length is rounded up
298 * in advance to PAGE_SIZE.
300 mm->key.len = roundup(len, PAGE_SIZE);
301 INIT_LIST_HEAD(&mm->entry);
303 mutex_lock(&uctx->mm_list_lock);
304 list_add(&mm->entry, &uctx->mm_head);
305 mutex_unlock(&uctx->mm_list_lock);
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309 (unsigned long long)mm->key.phy_addr,
310 (unsigned long)mm->key.len, uctx);
315 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
321 mutex_lock(&uctx->mm_list_lock);
322 list_for_each_entry(mm, &uctx->mm_head, entry) {
323 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
329 mutex_unlock(&uctx->mm_list_lock);
330 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332 mm->key.phy_addr, mm->key.len, uctx, found);
337 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338 struct ib_udata *udata)
341 struct qedr_ucontext *ctx;
342 struct qedr_alloc_ucontext_resp uresp;
343 struct qedr_dev *dev = get_qedr_dev(ibdev);
344 struct qed_rdma_add_user_out_params oparams;
347 return ERR_PTR(-EFAULT);
349 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
351 return ERR_PTR(-ENOMEM);
353 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
356 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
361 ctx->dpi = oparams.dpi;
362 ctx->dpi_addr = oparams.dpi_addr;
363 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364 ctx->dpi_size = oparams.dpi_size;
365 INIT_LIST_HEAD(&ctx->mm_head);
366 mutex_init(&ctx->mm_list_lock);
368 memset(&uresp, 0, sizeof(uresp));
370 uresp.db_pa = ctx->dpi_phys_addr;
371 uresp.db_size = ctx->dpi_size;
372 uresp.max_send_wr = dev->attr.max_sqe;
373 uresp.max_recv_wr = dev->attr.max_rqe;
374 uresp.max_srq_wr = dev->attr.max_srq_wr;
375 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378 uresp.max_cqes = QEDR_MAX_CQES;
380 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
386 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
390 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
392 return &ctx->ibucontext;
399 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
401 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402 struct qedr_mm *mm, *tmp;
405 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
407 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
409 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412 mm->key.phy_addr, mm->key.len, uctx);
413 list_del(&mm->entry);
421 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
423 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424 struct qedr_dev *dev = get_qedr_dev(context->device);
425 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426 u64 unmapped_db = dev->db_phys_addr;
427 unsigned long len = (vma->vm_end - vma->vm_start);
431 DP_DEBUG(dev, QEDR_MSG_INIT,
432 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434 if (vma->vm_start & (PAGE_SIZE - 1)) {
435 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
440 found = qedr_search_mmap(ucontext, vm_page, len);
442 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
447 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
449 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
451 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452 if (vma->vm_flags & VM_READ) {
453 DP_ERR(dev, "Trying to map doorbell bar for read\n");
457 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
459 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460 PAGE_SIZE, vma->vm_page_prot);
462 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463 rc = remap_pfn_range(vma, vma->vm_start,
464 vma->vm_pgoff, len, vma->vm_page_prot);
466 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
470 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata)
473 struct qedr_dev *dev = get_qedr_dev(ibdev);
478 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
479 (udata && context) ? "User Lib" : "Kernel");
481 if (!dev->rdma_ctx) {
482 DP_ERR(dev, "invlaid RDMA context\n");
483 return ERR_PTR(-EINVAL);
486 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
488 return ERR_PTR(-ENOMEM);
490 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
496 if (udata && context) {
497 struct qedr_alloc_pd_uresp uresp;
501 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
503 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
504 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
508 pd->uctx = get_qedr_ucontext(context);
519 int qedr_dealloc_pd(struct ib_pd *ibpd)
521 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
522 struct qedr_pd *pd = get_qedr_pd(ibpd);
525 pr_err("Invalid PD received in dealloc_pd\n");
529 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
530 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
537 static void qedr_free_pbl(struct qedr_dev *dev,
538 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
540 struct pci_dev *pdev = dev->pdev;
543 for (i = 0; i < pbl_info->num_pbls; i++) {
546 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
547 pbl[i].va, pbl[i].pa);
553 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
554 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
556 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
557 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
558 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
560 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
561 struct qedr_pbl_info *pbl_info,
564 struct pci_dev *pdev = dev->pdev;
565 struct qedr_pbl *pbl_table;
566 dma_addr_t *pbl_main_tbl;
571 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
573 return ERR_PTR(-ENOMEM);
575 for (i = 0; i < pbl_info->num_pbls; i++) {
576 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
581 memset(va, 0, pbl_info->pbl_size);
582 pbl_table[i].va = va;
583 pbl_table[i].pa = pa;
586 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
587 * the first one with physical pointers to all of the rest
589 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
590 for (i = 0; i < pbl_info->num_pbls - 1; i++)
591 pbl_main_tbl[i] = pbl_table[i + 1].pa;
596 for (i--; i >= 0; i--)
597 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
598 pbl_table[i].va, pbl_table[i].pa);
600 qedr_free_pbl(dev, pbl_info, pbl_table);
602 return ERR_PTR(-ENOMEM);
605 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
606 struct qedr_pbl_info *pbl_info,
607 u32 num_pbes, int two_layer_capable)
613 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
614 if (num_pbes > MAX_PBES_TWO_LAYER) {
615 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
620 /* calculate required pbl page size */
621 pbl_size = MIN_FW_PBL_PAGE_SIZE;
622 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
623 NUM_PBES_ON_PAGE(pbl_size);
625 while (pbl_capacity < num_pbes) {
627 pbl_capacity = pbl_size / sizeof(u64);
628 pbl_capacity = pbl_capacity * pbl_capacity;
631 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
632 num_pbls++; /* One for the layer0 ( points to the pbls) */
633 pbl_info->two_layered = true;
635 /* One layered PBL */
637 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
638 roundup_pow_of_two((num_pbes * sizeof(u64))));
639 pbl_info->two_layered = false;
642 pbl_info->num_pbls = num_pbls;
643 pbl_info->pbl_size = pbl_size;
644 pbl_info->num_pbes = num_pbes;
646 DP_DEBUG(dev, QEDR_MSG_MR,
647 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
648 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
653 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
654 struct qedr_pbl *pbl,
655 struct qedr_pbl_info *pbl_info)
657 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
658 struct qedr_pbl *pbl_tbl;
659 struct scatterlist *sg;
664 if (!pbl_info->num_pbes)
667 /* If we have a two layered pbl, the first pbl points to the rest
668 * of the pbls and the first entry lays on the second pbl in the table
670 if (pbl_info->two_layered)
675 pbe = (struct regpair *)pbl_tbl->va;
677 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
683 shift = ilog2(umem->page_size);
685 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
686 pages = sg_dma_len(sg) >> shift;
687 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
688 /* store the page address in pbe */
689 pbe->lo = cpu_to_le32(sg_dma_address(sg) +
690 umem->page_size * pg_cnt);
691 addr = upper_32_bits(sg_dma_address(sg) +
692 umem->page_size * pg_cnt);
693 pbe->hi = cpu_to_le32(addr);
698 if (total_num_pbes == pbl_info->num_pbes)
701 /* If the given pbl is full storing the pbes,
704 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
706 pbe = (struct regpair *)pbl_tbl->va;
713 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
714 struct qedr_cq *cq, struct ib_udata *udata)
716 struct qedr_create_cq_uresp uresp;
719 memset(&uresp, 0, sizeof(uresp));
721 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
722 uresp.icid = cq->icid;
724 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
726 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
731 static void consume_cqe(struct qedr_cq *cq)
733 if (cq->latest_cqe == cq->toggle_cqe)
734 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
736 cq->latest_cqe = qed_chain_consume(&cq->pbl);
739 static inline int qedr_align_cq_entries(int entries)
741 u64 size, aligned_size;
743 /* We allocate an extra entry that we don't report to the FW. */
744 size = (entries + 1) * QEDR_CQE_SIZE;
745 aligned_size = ALIGN(size, PAGE_SIZE);
747 return aligned_size / QEDR_CQE_SIZE;
750 static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
751 struct qedr_dev *dev,
752 struct qedr_userq *q,
753 u64 buf_addr, size_t buf_len,
754 int access, int dmasync)
759 q->buf_addr = buf_addr;
760 q->buf_len = buf_len;
761 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
762 if (IS_ERR(q->umem)) {
763 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
765 return PTR_ERR(q->umem);
768 page_cnt = ib_umem_page_count(q->umem);
769 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
773 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
774 if (IS_ERR_OR_NULL(q->pbl_tbl))
777 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
782 ib_umem_release(q->umem);
787 static inline void qedr_init_cq_params(struct qedr_cq *cq,
788 struct qedr_ucontext *ctx,
789 struct qedr_dev *dev, int vector,
790 int chain_entries, int page_cnt,
792 struct qed_rdma_create_cq_in_params
795 memset(params, 0, sizeof(*params));
796 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
797 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
798 params->cnq_id = vector;
799 params->cq_size = chain_entries - 1;
800 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
801 params->pbl_num_pages = page_cnt;
802 params->pbl_ptr = pbl_ptr;
803 params->pbl_two_level = 0;
806 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
808 /* Flush data before signalling doorbell */
810 cq->db.data.agg_flags = flags;
811 cq->db.data.value = cpu_to_le32(cons);
812 writeq(cq->db.raw, cq->db_addr);
814 /* Make sure write would stick */
818 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
820 struct qedr_cq *cq = get_qedr_cq(ibcq);
821 unsigned long sflags;
823 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
826 spin_lock_irqsave(&cq->cq_lock, sflags);
830 if (flags & IB_CQ_SOLICITED)
831 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
833 if (flags & IB_CQ_NEXT_COMP)
834 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
836 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
838 spin_unlock_irqrestore(&cq->cq_lock, sflags);
843 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
844 const struct ib_cq_init_attr *attr,
845 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
847 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
848 struct qed_rdma_destroy_cq_out_params destroy_oparams;
849 struct qed_rdma_destroy_cq_in_params destroy_iparams;
850 struct qedr_dev *dev = get_qedr_dev(ibdev);
851 struct qed_rdma_create_cq_in_params params;
852 struct qedr_create_cq_ureq ureq;
853 int vector = attr->comp_vector;
854 int entries = attr->cqe;
862 DP_DEBUG(dev, QEDR_MSG_INIT,
863 "create_cq: called from %s. entries=%d, vector=%d\n",
864 udata ? "User Lib" : "Kernel", entries, vector);
866 if (entries > QEDR_MAX_CQES) {
868 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
869 entries, QEDR_MAX_CQES);
870 return ERR_PTR(-EINVAL);
873 chain_entries = qedr_align_cq_entries(entries);
874 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
876 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
878 return ERR_PTR(-ENOMEM);
881 memset(&ureq, 0, sizeof(ureq));
882 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
884 "create cq: problem copying data from user space\n");
890 "create cq: cannot create a cq with 0 entries\n");
894 cq->cq_type = QEDR_CQ_TYPE_USER;
896 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
897 ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
901 pbl_ptr = cq->q.pbl_tbl->pa;
902 page_cnt = cq->q.pbl_info.num_pbes;
904 cq->ibcq.cqe = chain_entries;
906 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
908 rc = dev->ops->common->chain_alloc(dev->cdev,
909 QED_CHAIN_USE_TO_CONSUME,
911 QED_CHAIN_CNT_TYPE_U32,
913 sizeof(union rdma_cqe),
918 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
919 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
920 cq->ibcq.cqe = cq->pbl.capacity;
923 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
926 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
931 cq->sig = QEDR_CQ_MAGIC_NUMBER;
932 spin_lock_init(&cq->cq_lock);
935 rc = qedr_copy_cq_uresp(dev, cq, udata);
939 /* Generate doorbell address. */
940 cq->db_addr = dev->db_addr +
941 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
942 cq->db.data.icid = cq->icid;
943 cq->db.data.params = DB_AGG_CMD_SET <<
944 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
946 /* point to the very last element, passing it we will toggle */
947 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
948 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
949 cq->latest_cqe = NULL;
951 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
954 DP_DEBUG(dev, QEDR_MSG_CQ,
955 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
956 cq->icid, cq, params.cq_size);
961 destroy_iparams.icid = cq->icid;
962 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
966 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
968 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
971 ib_umem_release(cq->q.umem);
974 return ERR_PTR(-EINVAL);
977 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
979 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
980 struct qedr_cq *cq = get_qedr_cq(ibcq);
982 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
987 int qedr_destroy_cq(struct ib_cq *ibcq)
989 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
990 struct qed_rdma_destroy_cq_out_params oparams;
991 struct qed_rdma_destroy_cq_in_params iparams;
992 struct qedr_cq *cq = get_qedr_cq(ibcq);
994 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
996 /* GSIs CQs are handled by driver, so they don't exist in the FW */
997 if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
1000 iparams.icid = cq->icid;
1001 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
1005 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1008 if (ibcq->uobject && ibcq->uobject->context) {
1009 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1010 ib_umem_release(cq->q.umem);
1018 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1019 struct ib_qp_attr *attr,
1021 struct qed_rdma_modify_qp_in_params
1024 enum rdma_network_type nw_type;
1025 struct ib_gid_attr gid_attr;
1031 rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1032 attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1036 if (!memcmp(&gid, &zgid, sizeof(gid)))
1039 if (gid_attr.ndev) {
1040 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1042 dev_put(gid_attr.ndev);
1043 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1045 case RDMA_NETWORK_IPV6:
1046 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1047 sizeof(qp_params->sgid));
1048 memcpy(&qp_params->dgid.bytes[0],
1049 &attr->ah_attr.grh.dgid,
1050 sizeof(qp_params->dgid));
1051 qp_params->roce_mode = ROCE_V2_IPV6;
1052 SET_FIELD(qp_params->modify_flags,
1053 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1055 case RDMA_NETWORK_IB:
1056 memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1057 sizeof(qp_params->sgid));
1058 memcpy(&qp_params->dgid.bytes[0],
1059 &attr->ah_attr.grh.dgid,
1060 sizeof(qp_params->dgid));
1061 qp_params->roce_mode = ROCE_V1;
1063 case RDMA_NETWORK_IPV4:
1064 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1065 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1066 ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1067 qp_params->sgid.ipv4_addr = ipv4_addr;
1069 qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1070 qp_params->dgid.ipv4_addr = ipv4_addr;
1071 SET_FIELD(qp_params->modify_flags,
1072 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1073 qp_params->roce_mode = ROCE_V2_IPV4;
1078 for (i = 0; i < 4; i++) {
1079 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1080 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1083 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1084 qp_params->vlan_id = 0;
1089 static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1091 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1092 ib_umem_release(qp->usq.umem);
1095 static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1097 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1098 ib_umem_release(qp->urq.umem);
1101 static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1103 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1104 kfree(qp->wqe_wr_id);
1107 static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1109 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1110 kfree(qp->rqe_wr_id);
1113 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1114 struct ib_qp_init_attr *attrs)
1116 struct qedr_device_attr *qattr = &dev->attr;
1118 /* QP0... attrs->qp_type == IB_QPT_GSI */
1119 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1120 DP_DEBUG(dev, QEDR_MSG_QP,
1121 "create qp: unsupported qp type=0x%x requested\n",
1126 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1128 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1129 attrs->cap.max_send_wr, qattr->max_sqe);
1133 if (attrs->cap.max_inline_data > qattr->max_inline) {
1135 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1136 attrs->cap.max_inline_data, qattr->max_inline);
1140 if (attrs->cap.max_send_sge > qattr->max_sge) {
1142 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1143 attrs->cap.max_send_sge, qattr->max_sge);
1147 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1149 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1150 attrs->cap.max_recv_sge, qattr->max_sge);
1154 /* Unprivileged user space cannot create special QP */
1155 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1157 "create qp: userspace can't create special QPs of type=0x%x\n",
1165 static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1168 uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1169 uresp->rq_icid = qp->icid;
1172 static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1175 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1176 uresp->sq_icid = qp->icid + 1;
1179 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1180 struct qedr_qp *qp, struct ib_udata *udata)
1182 struct qedr_create_qp_uresp uresp;
1185 memset(&uresp, 0, sizeof(uresp));
1186 qedr_copy_sq_uresp(&uresp, qp);
1187 qedr_copy_rq_uresp(&uresp, qp);
1189 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1190 uresp.qp_id = qp->qp_id;
1192 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1195 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1201 static void qedr_set_qp_init_params(struct qedr_dev *dev,
1204 struct ib_qp_init_attr *attrs)
1208 spin_lock_init(&qp->q_lock);
1210 qp->qp_type = attrs->qp_type;
1211 qp->max_inline_data = attrs->cap.max_inline_data;
1212 qp->sq.max_sges = attrs->cap.max_send_sge;
1213 qp->state = QED_ROCE_QP_STATE_RESET;
1214 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1215 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1216 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1219 DP_DEBUG(dev, QEDR_MSG_QP,
1220 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1221 pd->pd_id, qp->qp_type, qp->max_inline_data,
1222 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1223 DP_DEBUG(dev, QEDR_MSG_QP,
1224 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1225 qp->sq.max_sges, qp->sq_cq->icid);
1226 qp->rq.max_sges = attrs->cap.max_recv_sge;
1227 DP_DEBUG(dev, QEDR_MSG_QP,
1228 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1229 qp->rq.max_sges, qp->rq_cq->icid);
1233 qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1234 struct qedr_create_qp_ureq *ureq)
1236 /* QP handle to be written in CQE */
1237 params->qp_handle_lo = ureq->qp_handle_lo;
1238 params->qp_handle_hi = ureq->qp_handle_hi;
1242 qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1244 qp->sq.db = dev->db_addr +
1245 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1246 qp->sq.db_data.data.icid = qp->icid + 1;
1250 qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1252 qp->rq.db = dev->db_addr +
1253 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1254 qp->rq.db_data.data.icid = qp->icid;
1258 qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1259 struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1261 /* Allocate driver internal RQ array */
1262 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1267 DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1273 qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1275 struct ib_qp_init_attr *attrs,
1276 struct qed_rdma_create_qp_in_params *params)
1280 /* Allocate driver internal SQ array */
1281 temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1282 temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1284 /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1285 qp->sq.max_wr = (u16)temp_max_wr;
1286 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1291 DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1293 /* QP handle to be written in CQE */
1294 params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1295 params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1300 static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1302 struct ib_qp_init_attr *attrs)
1304 u32 n_sq_elems, n_sq_entries;
1307 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1308 * the ring. The ring should allow at least a single WR, even if the
1309 * user requested none, due to allocation issues.
1311 n_sq_entries = attrs->cap.max_send_wr;
1312 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1313 n_sq_entries = max_t(u32, n_sq_entries, 1);
1314 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1315 rc = dev->ops->common->chain_alloc(dev->cdev,
1316 QED_CHAIN_USE_TO_PRODUCE,
1318 QED_CHAIN_CNT_TYPE_U32,
1320 QEDR_SQE_ELEMENT_SIZE,
1323 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1327 DP_DEBUG(dev, QEDR_MSG_SQ,
1328 "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1329 qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1330 n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1334 static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1336 struct ib_qp_init_attr *attrs)
1338 u32 n_rq_elems, n_rq_entries;
1341 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1342 * the ring. There ring should allow at least a single WR, even if the
1343 * user requested none, due to allocation issues.
1345 n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1346 n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1347 rc = dev->ops->common->chain_alloc(dev->cdev,
1348 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1350 QED_CHAIN_CNT_TYPE_U32,
1352 QEDR_RQE_ELEMENT_SIZE,
1356 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1360 DP_DEBUG(dev, QEDR_MSG_RQ,
1361 "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1362 qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1363 n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1365 /* n_rq_entries < u16 so the casting is safe */
1366 qp->rq.max_wr = (u16)n_rq_entries;
1372 qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1375 struct ib_qp_init_attr *attrs,
1376 struct ib_udata *udata,
1377 struct qed_rdma_create_qp_in_params *params)
1379 /* QP handle to be written in an async event */
1380 params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1381 params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1383 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1384 params->fmr_and_reserved_lkey = !udata;
1385 params->pd = pd->pd_id;
1386 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1387 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1388 params->max_sq_sges = 0;
1389 params->stats_queue = 0;
1392 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1393 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1395 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1396 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1401 qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1402 struct ib_qp_init_attr *attrs,
1403 struct ib_udata *udata,
1404 struct qed_rdma_create_qp_in_params *params)
1406 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1408 params->use_srq = false;
1411 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1412 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1414 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1415 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1419 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1421 DP_DEBUG(dev, QEDR_MSG_QP,
1422 "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1423 qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1427 static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1428 struct qedr_dev *dev,
1430 struct qedr_create_qp_ureq *ureq)
1434 /* SQ - read access only (0), dma sync not required (0) */
1435 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1436 ureq->sq_len, 0, 0);
1440 /* RQ - read access only (0), dma sync not required (0) */
1441 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1442 ureq->rq_len, 0, 0);
1445 qedr_cleanup_user_sq(dev, qp);
1450 qedr_init_kernel_qp(struct qedr_dev *dev,
1452 struct ib_qp_init_attr *attrs,
1453 struct qed_rdma_create_qp_in_params *params)
1457 rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1459 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1463 rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1465 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1466 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1470 rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1472 qedr_cleanup_kernel_sq(dev, qp);
1473 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1477 rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1479 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1480 qedr_cleanup_kernel_sq(dev, qp);
1481 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1488 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1489 struct ib_qp_init_attr *attrs,
1490 struct ib_udata *udata)
1492 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1493 struct qed_rdma_create_qp_out_params out_params;
1494 struct qed_rdma_create_qp_in_params in_params;
1495 struct qedr_pd *pd = get_qedr_pd(ibpd);
1496 struct ib_ucontext *ib_ctx = NULL;
1497 struct qedr_ucontext *ctx = NULL;
1498 struct qedr_create_qp_ureq ureq;
1503 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1504 udata ? "user library" : "kernel", pd);
1506 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1511 return ERR_PTR(-EINVAL);
1513 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1515 return ERR_PTR(-ENOMEM);
1517 DP_DEBUG(dev, QEDR_MSG_QP,
1518 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1519 get_qedr_cq(attrs->send_cq),
1520 get_qedr_cq(attrs->send_cq)->icid,
1521 get_qedr_cq(attrs->recv_cq),
1522 get_qedr_cq(attrs->recv_cq)->icid);
1524 qedr_set_qp_init_params(dev, qp, pd, attrs);
1526 if (attrs->qp_type == IB_QPT_GSI) {
1529 "create qp: unexpected udata when creating GSI QP\n");
1532 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1538 memset(&in_params, 0, sizeof(in_params));
1541 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1544 ib_ctx = ibpd->uobject->context;
1545 ctx = get_qedr_ucontext(ib_ctx);
1547 memset(&ureq, 0, sizeof(ureq));
1548 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1550 "create qp: problem copying data from user space\n");
1554 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1558 qedr_init_qp_user_params(&in_params, &ureq);
1560 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1565 qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1566 qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1568 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1569 &in_params, &out_params);
1574 qp->qp_id = out_params.qp_id;
1575 qp->icid = out_params.icid;
1576 qp->ibqp.qp_num = qp->qp_id;
1579 rc = qedr_copy_qp_uresp(dev, qp, udata);
1583 qedr_qp_user_print(dev, qp);
1585 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1586 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1589 DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1590 udata ? "user" : "kernel", qp);
1595 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1597 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1600 qedr_cleanup_user_sq(dev, qp);
1601 qedr_cleanup_user_rq(dev, qp);
1603 qedr_cleanup_kernel_sq(dev, qp);
1604 qedr_cleanup_kernel_rq(dev, qp);
1610 return ERR_PTR(-EFAULT);
1613 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1616 case QED_ROCE_QP_STATE_RESET:
1617 return IB_QPS_RESET;
1618 case QED_ROCE_QP_STATE_INIT:
1620 case QED_ROCE_QP_STATE_RTR:
1622 case QED_ROCE_QP_STATE_RTS:
1624 case QED_ROCE_QP_STATE_SQD:
1626 case QED_ROCE_QP_STATE_ERR:
1628 case QED_ROCE_QP_STATE_SQE:
1634 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1635 enum ib_qp_state qp_state)
1639 return QED_ROCE_QP_STATE_RESET;
1641 return QED_ROCE_QP_STATE_INIT;
1643 return QED_ROCE_QP_STATE_RTR;
1645 return QED_ROCE_QP_STATE_RTS;
1647 return QED_ROCE_QP_STATE_SQD;
1649 return QED_ROCE_QP_STATE_ERR;
1651 return QED_ROCE_QP_STATE_ERR;
1655 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1657 qed_chain_reset(&qph->pbl);
1661 qph->db_data.data.value = cpu_to_le16(0);
1664 static int qedr_update_qp_state(struct qedr_dev *dev,
1666 enum qed_roce_qp_state new_state)
1670 if (new_state == qp->state)
1673 switch (qp->state) {
1674 case QED_ROCE_QP_STATE_RESET:
1675 switch (new_state) {
1676 case QED_ROCE_QP_STATE_INIT:
1677 qp->prev_wqe_size = 0;
1678 qedr_reset_qp_hwq_info(&qp->sq);
1679 qedr_reset_qp_hwq_info(&qp->rq);
1686 case QED_ROCE_QP_STATE_INIT:
1687 switch (new_state) {
1688 case QED_ROCE_QP_STATE_RTR:
1689 /* Update doorbell (in case post_recv was
1690 * done before move to RTR)
1693 writel(qp->rq.db_data.raw, qp->rq.db);
1694 /* Make sure write takes effect */
1697 case QED_ROCE_QP_STATE_ERR:
1700 /* Invalid state change. */
1705 case QED_ROCE_QP_STATE_RTR:
1707 switch (new_state) {
1708 case QED_ROCE_QP_STATE_RTS:
1710 case QED_ROCE_QP_STATE_ERR:
1713 /* Invalid state change. */
1718 case QED_ROCE_QP_STATE_RTS:
1720 switch (new_state) {
1721 case QED_ROCE_QP_STATE_SQD:
1723 case QED_ROCE_QP_STATE_ERR:
1726 /* Invalid state change. */
1731 case QED_ROCE_QP_STATE_SQD:
1733 switch (new_state) {
1734 case QED_ROCE_QP_STATE_RTS:
1735 case QED_ROCE_QP_STATE_ERR:
1738 /* Invalid state change. */
1743 case QED_ROCE_QP_STATE_ERR:
1745 switch (new_state) {
1746 case QED_ROCE_QP_STATE_RESET:
1747 if ((qp->rq.prod != qp->rq.cons) ||
1748 (qp->sq.prod != qp->sq.cons)) {
1750 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1751 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1769 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1770 int attr_mask, struct ib_udata *udata)
1772 struct qedr_qp *qp = get_qedr_qp(ibqp);
1773 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1774 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1775 enum ib_qp_state old_qp_state, new_qp_state;
1778 DP_DEBUG(dev, QEDR_MSG_QP,
1779 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1782 old_qp_state = qedr_get_ibqp_state(qp->state);
1783 if (attr_mask & IB_QP_STATE)
1784 new_qp_state = attr->qp_state;
1786 new_qp_state = old_qp_state;
1788 if (!ib_modify_qp_is_ok
1789 (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1790 IB_LINK_LAYER_ETHERNET)) {
1792 "modify qp: invalid attribute mask=0x%x specified for\n"
1793 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1794 attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1800 /* Translate the masks... */
1801 if (attr_mask & IB_QP_STATE) {
1802 SET_FIELD(qp_params.modify_flags,
1803 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1804 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1807 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1808 qp_params.sqd_async = true;
1810 if (attr_mask & IB_QP_PKEY_INDEX) {
1811 SET_FIELD(qp_params.modify_flags,
1812 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1813 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1818 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1821 if (attr_mask & IB_QP_QKEY)
1822 qp->qkey = attr->qkey;
1824 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1825 SET_FIELD(qp_params.modify_flags,
1826 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1827 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1828 IB_ACCESS_REMOTE_READ;
1829 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1830 IB_ACCESS_REMOTE_WRITE;
1831 qp_params.incoming_atomic_en = attr->qp_access_flags &
1832 IB_ACCESS_REMOTE_ATOMIC;
1835 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1836 if (attr_mask & IB_QP_PATH_MTU) {
1837 if (attr->path_mtu < IB_MTU_256 ||
1838 attr->path_mtu > IB_MTU_4096) {
1839 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1843 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1844 ib_mtu_enum_to_int(iboe_get_mtu
1850 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1851 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1854 SET_FIELD(qp_params.modify_flags,
1855 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1857 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1858 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1859 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1861 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1863 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1866 "modify qp: problems with GID index %d (rc=%d)\n",
1867 attr->ah_attr.grh.sgid_index, rc);
1871 rc = qedr_get_dmac(dev, &attr->ah_attr,
1872 qp_params.remote_mac_addr);
1876 qp_params.use_local_mac = true;
1877 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1879 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1880 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1881 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1882 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1883 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1884 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1885 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1886 qp_params.remote_mac_addr);
1888 qp_params.mtu = qp->mtu;
1889 qp_params.lb_indication = false;
1892 if (!qp_params.mtu) {
1893 /* Stay with current MTU */
1895 qp_params.mtu = qp->mtu;
1898 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1901 if (attr_mask & IB_QP_TIMEOUT) {
1902 SET_FIELD(qp_params.modify_flags,
1903 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1905 qp_params.ack_timeout = attr->timeout;
1906 if (attr->timeout) {
1909 temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1910 /* FW requires [msec] */
1911 qp_params.ack_timeout = temp;
1914 qp_params.ack_timeout = 0;
1917 if (attr_mask & IB_QP_RETRY_CNT) {
1918 SET_FIELD(qp_params.modify_flags,
1919 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1920 qp_params.retry_cnt = attr->retry_cnt;
1923 if (attr_mask & IB_QP_RNR_RETRY) {
1924 SET_FIELD(qp_params.modify_flags,
1925 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1926 qp_params.rnr_retry_cnt = attr->rnr_retry;
1929 if (attr_mask & IB_QP_RQ_PSN) {
1930 SET_FIELD(qp_params.modify_flags,
1931 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1932 qp_params.rq_psn = attr->rq_psn;
1933 qp->rq_psn = attr->rq_psn;
1936 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1937 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1940 "unsupported max_rd_atomic=%d, supported=%d\n",
1941 attr->max_rd_atomic,
1942 dev->attr.max_qp_req_rd_atomic_resc);
1946 SET_FIELD(qp_params.modify_flags,
1947 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1948 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1951 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1952 SET_FIELD(qp_params.modify_flags,
1953 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1954 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1957 if (attr_mask & IB_QP_SQ_PSN) {
1958 SET_FIELD(qp_params.modify_flags,
1959 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1960 qp_params.sq_psn = attr->sq_psn;
1961 qp->sq_psn = attr->sq_psn;
1964 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1965 if (attr->max_dest_rd_atomic >
1966 dev->attr.max_qp_resp_rd_atomic_resc) {
1968 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1969 attr->max_dest_rd_atomic,
1970 dev->attr.max_qp_resp_rd_atomic_resc);
1976 SET_FIELD(qp_params.modify_flags,
1977 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1978 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1981 if (attr_mask & IB_QP_DEST_QPN) {
1982 SET_FIELD(qp_params.modify_flags,
1983 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1985 qp_params.dest_qp = attr->dest_qp_num;
1986 qp->dest_qp_num = attr->dest_qp_num;
1989 if (qp->qp_type != IB_QPT_GSI)
1990 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1991 qp->qed_qp, &qp_params);
1993 if (attr_mask & IB_QP_STATE) {
1994 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1995 rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
1996 qp->state = qp_params.new_state;
2003 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2005 int ib_qp_acc_flags = 0;
2007 if (params->incoming_rdma_write_en)
2008 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2009 if (params->incoming_rdma_read_en)
2010 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2011 if (params->incoming_atomic_en)
2012 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2013 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2014 return ib_qp_acc_flags;
2017 int qedr_query_qp(struct ib_qp *ibqp,
2018 struct ib_qp_attr *qp_attr,
2019 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2021 struct qed_rdma_query_qp_out_params params;
2022 struct qedr_qp *qp = get_qedr_qp(ibqp);
2023 struct qedr_dev *dev = qp->dev;
2026 memset(¶ms, 0, sizeof(params));
2028 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2032 memset(qp_attr, 0, sizeof(*qp_attr));
2033 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2035 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2036 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2037 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2038 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2039 qp_attr->rq_psn = params.rq_psn;
2040 qp_attr->sq_psn = params.sq_psn;
2041 qp_attr->dest_qp_num = params.dest_qp;
2043 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2045 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2046 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2047 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2048 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2049 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2050 qp_init_attr->cap = qp_attr->cap;
2052 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0],
2053 sizeof(qp_attr->ah_attr.grh.dgid.raw));
2055 qp_attr->ah_attr.grh.flow_label = params.flow_label;
2056 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2057 qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2058 qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2060 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2061 qp_attr->ah_attr.port_num = 1;
2062 qp_attr->ah_attr.sl = 0;
2063 qp_attr->timeout = params.timeout;
2064 qp_attr->rnr_retry = params.rnr_retry;
2065 qp_attr->retry_cnt = params.retry_cnt;
2066 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2067 qp_attr->pkey_index = params.pkey_index;
2068 qp_attr->port_num = 1;
2069 qp_attr->ah_attr.src_path_bits = 0;
2070 qp_attr->ah_attr.static_rate = 0;
2071 qp_attr->alt_pkey_index = 0;
2072 qp_attr->alt_port_num = 0;
2073 qp_attr->alt_timeout = 0;
2074 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2076 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2077 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2078 qp_attr->max_rd_atomic = params.max_rd_atomic;
2079 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2081 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2082 qp_attr->cap.max_inline_data);
2088 int qedr_destroy_qp(struct ib_qp *ibqp)
2090 struct qedr_qp *qp = get_qedr_qp(ibqp);
2091 struct qedr_dev *dev = qp->dev;
2092 struct ib_qp_attr attr;
2096 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2099 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2100 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2101 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2103 attr.qp_state = IB_QPS_ERR;
2104 attr_mask |= IB_QP_STATE;
2106 /* Change the QP state to ERROR */
2107 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2110 if (qp->qp_type != IB_QPT_GSI) {
2111 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2115 qedr_destroy_gsi_qp(dev);
2118 if (ibqp->uobject && ibqp->uobject->context) {
2119 qedr_cleanup_user_sq(dev, qp);
2120 qedr_cleanup_user_rq(dev, qp);
2122 qedr_cleanup_kernel_sq(dev, qp);
2123 qedr_cleanup_kernel_rq(dev, qp);
2131 struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
2132 struct ib_udata *udata)
2136 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2138 return ERR_PTR(-ENOMEM);
2145 int qedr_destroy_ah(struct ib_ah *ibah)
2147 struct qedr_ah *ah = get_qedr_ah(ibah);
2153 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2155 struct qedr_pbl *pbl, *tmp;
2157 if (info->pbl_table)
2158 list_add_tail(&info->pbl_table->list_entry,
2159 &info->free_pbl_list);
2161 if (!list_empty(&info->inuse_pbl_list))
2162 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2164 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2165 list_del(&pbl->list_entry);
2166 qedr_free_pbl(dev, &info->pbl_info, pbl);
2170 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2171 size_t page_list_len, bool two_layered)
2173 struct qedr_pbl *tmp;
2176 INIT_LIST_HEAD(&info->free_pbl_list);
2177 INIT_LIST_HEAD(&info->inuse_pbl_list);
2179 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2180 page_list_len, two_layered);
2184 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2185 if (!info->pbl_table) {
2190 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2191 &info->pbl_table->pa);
2193 /* in usual case we use 2 PBLs, so we add one to free
2194 * list and allocating another one
2196 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2198 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2202 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2204 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2208 free_mr_info(dev, info);
2213 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2214 u64 usr_addr, int acc, struct ib_udata *udata)
2216 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2221 pd = get_qedr_pd(ibpd);
2222 DP_DEBUG(dev, QEDR_MSG_MR,
2223 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2224 pd->pd_id, start, len, usr_addr, acc);
2226 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2227 return ERR_PTR(-EINVAL);
2229 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2233 mr->type = QEDR_MR_USER;
2235 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2236 if (IS_ERR(mr->umem)) {
2241 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2245 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2246 &mr->info.pbl_info);
2248 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2250 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2254 /* Index only, 18 bit long, lkey = itid << 8 | key */
2255 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2257 mr->hw_mr.pd = pd->pd_id;
2258 mr->hw_mr.local_read = 1;
2259 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2260 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2261 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2262 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2263 mr->hw_mr.mw_bind = false;
2264 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2265 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2266 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2267 mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2268 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2269 mr->hw_mr.length = len;
2270 mr->hw_mr.vaddr = usr_addr;
2271 mr->hw_mr.zbva = false;
2272 mr->hw_mr.phy_mr = false;
2273 mr->hw_mr.dma_mr = false;
2275 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2277 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2281 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2282 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2283 mr->hw_mr.remote_atomic)
2284 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2286 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2291 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2293 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2299 int qedr_dereg_mr(struct ib_mr *ib_mr)
2301 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2302 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2305 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2309 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2311 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2312 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2314 /* it could be user registered memory. */
2316 ib_umem_release(mr->umem);
2323 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2324 int max_page_list_len)
2326 struct qedr_pd *pd = get_qedr_pd(ibpd);
2327 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2331 DP_DEBUG(dev, QEDR_MSG_MR,
2332 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2335 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2340 mr->type = QEDR_MR_FRMR;
2342 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2346 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2348 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2352 /* Index only, 18 bit long, lkey = itid << 8 | key */
2353 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2355 mr->hw_mr.pd = pd->pd_id;
2356 mr->hw_mr.local_read = 1;
2357 mr->hw_mr.local_write = 0;
2358 mr->hw_mr.remote_read = 0;
2359 mr->hw_mr.remote_write = 0;
2360 mr->hw_mr.remote_atomic = 0;
2361 mr->hw_mr.mw_bind = false;
2362 mr->hw_mr.pbl_ptr = 0;
2363 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2364 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2366 mr->hw_mr.length = 0;
2367 mr->hw_mr.vaddr = 0;
2368 mr->hw_mr.zbva = false;
2369 mr->hw_mr.phy_mr = true;
2370 mr->hw_mr.dma_mr = false;
2372 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2374 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2378 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2379 mr->ibmr.rkey = mr->ibmr.lkey;
2381 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2385 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2391 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2392 enum ib_mr_type mr_type, u32 max_num_sg)
2394 struct qedr_dev *dev;
2397 if (mr_type != IB_MR_TYPE_MEM_REG)
2398 return ERR_PTR(-EINVAL);
2400 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2403 return ERR_PTR(-EINVAL);
2410 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2412 struct qedr_mr *mr = get_qedr_mr(ibmr);
2413 struct qedr_pbl *pbl_table;
2414 struct regpair *pbe;
2417 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2418 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2422 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2425 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2426 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2427 pbe = (struct regpair *)pbl_table->va;
2428 pbe += mr->npages % pbes_in_page;
2429 pbe->lo = cpu_to_le32((u32)addr);
2430 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2437 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2439 int work = info->completed - info->completed_handled - 1;
2441 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2442 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2443 struct qedr_pbl *pbl;
2445 /* Free all the page list that are possible to be freed
2446 * (all the ones that were invalidated), under the assumption
2447 * that if an FMR was completed successfully that means that
2448 * if there was an invalidate operation before it also ended
2450 pbl = list_first_entry(&info->inuse_pbl_list,
2451 struct qedr_pbl, list_entry);
2452 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
2453 info->completed_handled++;
2457 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2458 int sg_nents, unsigned int *sg_offset)
2460 struct qedr_mr *mr = get_qedr_mr(ibmr);
2464 handle_completed_mrs(mr->dev, &mr->info);
2465 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2468 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2470 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2471 struct qedr_pd *pd = get_qedr_pd(ibpd);
2475 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2477 return ERR_PTR(-ENOMEM);
2479 mr->type = QEDR_MR_DMA;
2481 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2483 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2487 /* index only, 18 bit long, lkey = itid << 8 | key */
2488 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2489 mr->hw_mr.pd = pd->pd_id;
2490 mr->hw_mr.local_read = 1;
2491 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2492 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2493 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2494 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2495 mr->hw_mr.dma_mr = true;
2497 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2499 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2503 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2504 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2505 mr->hw_mr.remote_atomic)
2506 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2508 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2512 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2518 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2520 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2523 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2527 for (i = 0; i < num_sge; i++)
2528 len += sg_list[i].length;
2533 static void swap_wqe_data64(u64 *p)
2537 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2538 *p = cpu_to_be64(cpu_to_le64(*p));
2541 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2542 struct qedr_qp *qp, u8 *wqe_size,
2543 struct ib_send_wr *wr,
2544 struct ib_send_wr **bad_wr, u8 *bits,
2547 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2548 char *seg_prt, *wqe;
2551 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2552 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2566 /* Copy data inline */
2567 for (i = 0; i < wr->num_sge; i++) {
2568 u32 len = wr->sg_list[i].length;
2569 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2574 /* New segment required */
2576 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2578 seg_siz = sizeof(struct rdma_sq_common_wqe);
2582 /* Calculate currently allowed length */
2583 cur = min_t(u32, len, seg_siz);
2584 memcpy(seg_prt, src, cur);
2586 /* Update segment variables */
2590 /* Update sge variables */
2594 /* Swap fully-completed segments */
2596 swap_wqe_data64((u64 *)wqe);
2600 /* swap last not completed segment */
2602 swap_wqe_data64((u64 *)wqe);
2607 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2609 DMA_REGPAIR_LE(sge->addr, vaddr); \
2610 (sge)->length = cpu_to_le32(vlength); \
2611 (sge)->flags = cpu_to_le32(vflags); \
2614 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2616 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2617 (hdr)->num_sges = num_sge; \
2620 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2622 DMA_REGPAIR_LE(sge->addr, vaddr); \
2623 (sge)->length = cpu_to_le32(vlength); \
2624 (sge)->l_key = cpu_to_le32(vlkey); \
2627 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2628 struct ib_send_wr *wr)
2633 for (i = 0; i < wr->num_sge; i++) {
2634 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2636 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2637 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2638 sge->length = cpu_to_le32(wr->sg_list[i].length);
2639 data_size += wr->sg_list[i].length;
2643 *wqe_size += wr->num_sge;
2648 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2650 struct rdma_sq_rdma_wqe_1st *rwqe,
2651 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2652 struct ib_send_wr *wr,
2653 struct ib_send_wr **bad_wr)
2655 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2656 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2658 if (wr->send_flags & IB_SEND_INLINE &&
2659 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2660 wr->opcode == IB_WR_RDMA_WRITE)) {
2663 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2664 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2665 bad_wr, &rwqe->flags, flags);
2668 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2671 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2673 struct rdma_sq_send_wqe_1st *swqe,
2674 struct rdma_sq_send_wqe_2st *swqe2,
2675 struct ib_send_wr *wr,
2676 struct ib_send_wr **bad_wr)
2678 memset(swqe2, 0, sizeof(*swqe2));
2679 if (wr->send_flags & IB_SEND_INLINE) {
2682 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2683 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2684 bad_wr, &swqe->flags, flags);
2687 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2690 static int qedr_prepare_reg(struct qedr_qp *qp,
2691 struct rdma_sq_fmr_wqe_1st *fwqe1,
2692 struct ib_reg_wr *wr)
2694 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2695 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2697 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2698 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2699 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2700 fwqe1->l_key = wr->key;
2702 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2703 !!(wr->access & IB_ACCESS_REMOTE_READ));
2704 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2705 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2706 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2707 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2708 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2709 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2710 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2711 fwqe2->fmr_ctrl = 0;
2713 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2714 ilog2(mr->ibmr.page_size) - 12);
2716 fwqe2->length_hi = 0;
2717 fwqe2->length_lo = mr->ibmr.length;
2718 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2719 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2721 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2726 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2729 case IB_WR_RDMA_WRITE:
2730 case IB_WR_RDMA_WRITE_WITH_IMM:
2731 return IB_WC_RDMA_WRITE;
2732 case IB_WR_SEND_WITH_IMM:
2734 case IB_WR_SEND_WITH_INV:
2736 case IB_WR_RDMA_READ:
2737 return IB_WC_RDMA_READ;
2738 case IB_WR_ATOMIC_CMP_AND_SWP:
2739 return IB_WC_COMP_SWAP;
2740 case IB_WR_ATOMIC_FETCH_AND_ADD:
2741 return IB_WC_FETCH_ADD;
2743 return IB_WC_REG_MR;
2744 case IB_WR_LOCAL_INV:
2745 return IB_WC_LOCAL_INV;
2751 static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2753 int wq_is_full, err_wr, pbl_is_full;
2754 struct qedr_dev *dev = qp->dev;
2756 /* prevent SQ overflow and/or processing of a bad WR */
2757 err_wr = wr->num_sge > qp->sq.max_sges;
2758 wq_is_full = qedr_wq_is_full(&qp->sq);
2759 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2760 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2761 if (wq_is_full || err_wr || pbl_is_full) {
2762 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2764 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2766 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2769 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2771 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2773 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2777 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2779 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2781 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2788 static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2789 struct ib_send_wr **bad_wr)
2791 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2792 struct qedr_qp *qp = get_qedr_qp(ibqp);
2793 struct rdma_sq_atomic_wqe_1st *awqe1;
2794 struct rdma_sq_atomic_wqe_2nd *awqe2;
2795 struct rdma_sq_atomic_wqe_3rd *awqe3;
2796 struct rdma_sq_send_wqe_2st *swqe2;
2797 struct rdma_sq_local_inv_wqe *iwqe;
2798 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2799 struct rdma_sq_send_wqe_1st *swqe;
2800 struct rdma_sq_rdma_wqe_1st *rwqe;
2801 struct rdma_sq_fmr_wqe_1st *fwqe1;
2802 struct rdma_sq_common_wqe *wqe;
2807 if (!qedr_can_post_send(qp, wr)) {
2812 wqe = qed_chain_produce(&qp->sq.pbl);
2813 qp->wqe_wr_id[qp->sq.prod].signaled =
2814 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2817 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2818 !!(wr->send_flags & IB_SEND_SOLICITED));
2819 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2820 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2821 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2822 !!(wr->send_flags & IB_SEND_FENCE));
2823 wqe->prev_wqe_size = qp->prev_wqe_size;
2825 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2827 switch (wr->opcode) {
2828 case IB_WR_SEND_WITH_IMM:
2829 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2830 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2832 swqe2 = qed_chain_produce(&qp->sq.pbl);
2834 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2835 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2837 swqe->length = cpu_to_le32(length);
2838 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2839 qp->prev_wqe_size = swqe->wqe_size;
2840 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2843 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2844 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2847 swqe2 = qed_chain_produce(&qp->sq.pbl);
2848 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2850 swqe->length = cpu_to_le32(length);
2851 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2852 qp->prev_wqe_size = swqe->wqe_size;
2853 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2855 case IB_WR_SEND_WITH_INV:
2856 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2857 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2858 swqe2 = qed_chain_produce(&qp->sq.pbl);
2860 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2861 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2863 swqe->length = cpu_to_le32(length);
2864 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2865 qp->prev_wqe_size = swqe->wqe_size;
2866 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2869 case IB_WR_RDMA_WRITE_WITH_IMM:
2870 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2871 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2874 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2875 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2876 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2878 rwqe->length = cpu_to_le32(length);
2879 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2880 qp->prev_wqe_size = rwqe->wqe_size;
2881 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2883 case IB_WR_RDMA_WRITE:
2884 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2885 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2888 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2889 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2891 rwqe->length = cpu_to_le32(length);
2892 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2893 qp->prev_wqe_size = rwqe->wqe_size;
2894 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2896 case IB_WR_RDMA_READ_WITH_INV:
2898 "RDMA READ WITH INVALIDATE not supported\n");
2903 case IB_WR_RDMA_READ:
2904 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2905 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2908 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2909 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2911 rwqe->length = cpu_to_le32(length);
2912 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2913 qp->prev_wqe_size = rwqe->wqe_size;
2914 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2917 case IB_WR_ATOMIC_CMP_AND_SWP:
2918 case IB_WR_ATOMIC_FETCH_AND_ADD:
2919 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2920 awqe1->wqe_size = 4;
2922 awqe2 = qed_chain_produce(&qp->sq.pbl);
2923 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2924 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2926 awqe3 = qed_chain_produce(&qp->sq.pbl);
2928 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2929 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2930 DMA_REGPAIR_LE(awqe3->swap_data,
2931 atomic_wr(wr)->compare_add);
2933 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2934 DMA_REGPAIR_LE(awqe3->swap_data,
2935 atomic_wr(wr)->swap);
2936 DMA_REGPAIR_LE(awqe3->cmp_data,
2937 atomic_wr(wr)->compare_add);
2940 qedr_prepare_sq_sges(qp, NULL, wr);
2942 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2943 qp->prev_wqe_size = awqe1->wqe_size;
2946 case IB_WR_LOCAL_INV:
2947 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2950 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2951 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2952 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2953 qp->prev_wqe_size = iwqe->wqe_size;
2956 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2957 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2958 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2959 fwqe1->wqe_size = 2;
2961 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2963 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2968 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2969 qp->prev_wqe_size = fwqe1->wqe_size;
2972 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2981 /* Restore prod to its position before
2982 * this WR was processed
2984 value = le16_to_cpu(qp->sq.db_data.data.value);
2985 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2987 /* Restore prev_wqe_size */
2988 qp->prev_wqe_size = wqe->prev_wqe_size;
2990 DP_ERR(dev, "POST SEND FAILED\n");
2996 int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2997 struct ib_send_wr **bad_wr)
2999 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3000 struct qedr_qp *qp = get_qedr_qp(ibqp);
3001 unsigned long flags;
3006 if (qp->qp_type == IB_QPT_GSI)
3007 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3009 spin_lock_irqsave(&qp->q_lock, flags);
3011 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3012 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3013 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3014 spin_unlock_irqrestore(&qp->q_lock, flags);
3016 DP_DEBUG(dev, QEDR_MSG_CQ,
3017 "QP in wrong state! QP icid=0x%x state %d\n",
3018 qp->icid, qp->state);
3023 rc = __qedr_post_send(ibqp, wr, bad_wr);
3027 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3029 qedr_inc_sw_prod(&qp->sq);
3031 qp->sq.db_data.data.value++;
3037 * If there was a failure in the first WR then it will be triggered in
3038 * vane. However this is not harmful (as long as the producer value is
3039 * unchanged). For performance reasons we avoid checking for this
3040 * redundant doorbell.
3043 writel(qp->sq.db_data.raw, qp->sq.db);
3045 /* Make sure write sticks */
3048 spin_unlock_irqrestore(&qp->q_lock, flags);
3053 int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3054 struct ib_recv_wr **bad_wr)
3056 struct qedr_qp *qp = get_qedr_qp(ibqp);
3057 struct qedr_dev *dev = qp->dev;
3058 unsigned long flags;
3061 if (qp->qp_type == IB_QPT_GSI)
3062 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3064 spin_lock_irqsave(&qp->q_lock, flags);
3066 if (qp->state == QED_ROCE_QP_STATE_RESET) {
3067 spin_unlock_irqrestore(&qp->q_lock, flags);
3075 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3076 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3077 wr->num_sge > qp->rq.max_sges) {
3078 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3079 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3080 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3086 for (i = 0; i < wr->num_sge; i++) {
3088 struct rdma_rq_sge *rqe =
3089 qed_chain_produce(&qp->rq.pbl);
3091 /* First one must include the number
3092 * of SGE in the list
3095 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3098 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3099 wr->sg_list[i].lkey);
3101 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3102 wr->sg_list[i].length, flags);
3105 /* Special case of no sges. FW requires between 1-4 sges...
3106 * in this case we need to post 1 sge with length zero. this is
3107 * because rdma write with immediate consumes an RQ.
3111 struct rdma_rq_sge *rqe =
3112 qed_chain_produce(&qp->rq.pbl);
3114 /* First one must include the number
3115 * of SGE in the list
3117 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3118 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3120 RQ_SGE_SET(rqe, 0, 0, flags);
3124 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3125 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3127 qedr_inc_sw_prod(&qp->rq);
3129 /* Flush all the writes before signalling doorbell */
3132 qp->rq.db_data.data.value++;
3134 writel(qp->rq.db_data.raw, qp->rq.db);
3136 /* Make sure write sticks */
3142 spin_unlock_irqrestore(&qp->q_lock, flags);
3147 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3149 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3151 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3155 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3157 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3160 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3161 resp_cqe->qp_handle.lo,
3166 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3168 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3170 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3173 /* Return latest CQE (needs processing) */
3174 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3176 return cq->latest_cqe;
3179 /* In fmr we need to increase the number of fmr completed counter for the fmr
3180 * algorithm determining whether we can free a pbl or not.
3181 * we need to perform this whether the work request was signaled or not. for
3182 * this purpose we call this function from the condition that checks if a wr
3183 * should be skipped, to make sure we don't miss it ( possibly this fmr
3184 * operation was not signalted)
3186 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3188 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3189 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3192 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3193 struct qedr_cq *cq, int num_entries,
3194 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3199 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3200 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3201 qedr_chk_if_fmr(qp);
3207 wc->status = status;
3210 wc->src_qp = qp->id;
3213 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3214 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3216 switch (wc->opcode) {
3217 case IB_WC_RDMA_WRITE:
3218 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3220 case IB_WC_COMP_SWAP:
3221 case IB_WC_FETCH_ADD:
3225 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3235 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3236 qed_chain_consume(&qp->sq.pbl);
3237 qedr_inc_sw_cons(&qp->sq);
3243 static int qedr_poll_cq_req(struct qedr_dev *dev,
3244 struct qedr_qp *qp, struct qedr_cq *cq,
3245 int num_entries, struct ib_wc *wc,
3246 struct rdma_cqe_requester *req)
3250 switch (req->status) {
3251 case RDMA_CQE_REQ_STS_OK:
3252 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3255 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3256 if (qp->state != QED_ROCE_QP_STATE_ERR)
3258 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3259 cq->icid, qp->icid);
3260 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3261 IB_WC_WR_FLUSH_ERR, 1);
3264 /* process all WQE before the cosumer */
3265 qp->state = QED_ROCE_QP_STATE_ERR;
3266 cnt = process_req(dev, qp, cq, num_entries, wc,
3267 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3269 /* if we have extra WC fill it with actual error info */
3270 if (cnt < num_entries) {
3271 enum ib_wc_status wc_status;
3273 switch (req->status) {
3274 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3276 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3277 cq->icid, qp->icid);
3278 wc_status = IB_WC_BAD_RESP_ERR;
3280 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3282 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3283 cq->icid, qp->icid);
3284 wc_status = IB_WC_LOC_LEN_ERR;
3286 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3288 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3289 cq->icid, qp->icid);
3290 wc_status = IB_WC_LOC_QP_OP_ERR;
3292 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3294 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3295 cq->icid, qp->icid);
3296 wc_status = IB_WC_LOC_PROT_ERR;
3298 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3300 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3301 cq->icid, qp->icid);
3302 wc_status = IB_WC_MW_BIND_ERR;
3304 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3306 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3307 cq->icid, qp->icid);
3308 wc_status = IB_WC_REM_INV_REQ_ERR;
3310 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3312 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3313 cq->icid, qp->icid);
3314 wc_status = IB_WC_REM_ACCESS_ERR;
3316 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3318 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3319 cq->icid, qp->icid);
3320 wc_status = IB_WC_REM_OP_ERR;
3322 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3324 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3325 cq->icid, qp->icid);
3326 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3328 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3330 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3331 cq->icid, qp->icid);
3332 wc_status = IB_WC_RETRY_EXC_ERR;
3336 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3337 cq->icid, qp->icid);
3338 wc_status = IB_WC_GENERAL_ERR;
3340 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3348 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3349 struct qedr_cq *cq, struct ib_wc *wc,
3350 struct rdma_cqe_responder *resp, u64 wr_id)
3352 enum ib_wc_status wc_status = IB_WC_SUCCESS;
3355 wc->opcode = IB_WC_RECV;
3358 switch (resp->status) {
3359 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3360 wc_status = IB_WC_LOC_ACCESS_ERR;
3362 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3363 wc_status = IB_WC_LOC_LEN_ERR;
3365 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3366 wc_status = IB_WC_LOC_QP_OP_ERR;
3368 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3369 wc_status = IB_WC_LOC_PROT_ERR;
3371 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3372 wc_status = IB_WC_MW_BIND_ERR;
3374 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3375 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3377 case RDMA_CQE_RESP_STS_OK:
3378 wc_status = IB_WC_SUCCESS;
3379 wc->byte_len = le32_to_cpu(resp->length);
3381 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3383 if (flags == QEDR_RESP_RDMA_IMM)
3384 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3386 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3388 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3389 wc->wc_flags |= IB_WC_WITH_IMM;
3393 wc->status = IB_WC_GENERAL_ERR;
3394 DP_ERR(dev, "Invalid CQE status detected\n");
3398 wc->status = wc_status;
3400 wc->src_qp = qp->id;
3405 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3406 struct qedr_cq *cq, struct ib_wc *wc,
3407 struct rdma_cqe_responder *resp)
3409 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3411 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3413 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3414 qed_chain_consume(&qp->rq.pbl);
3415 qedr_inc_sw_cons(&qp->rq);
3420 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3421 int num_entries, struct ib_wc *wc, u16 hw_cons)
3425 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3427 wc->status = IB_WC_WR_FLUSH_ERR;
3430 wc->src_qp = qp->id;
3432 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3437 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3438 qed_chain_consume(&qp->rq.pbl);
3439 qedr_inc_sw_cons(&qp->rq);
3445 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3446 struct rdma_cqe_responder *resp, int *update)
3448 if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3454 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3455 struct qedr_cq *cq, int num_entries,
3456 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3461 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3462 cnt = process_resp_flush(qp, cq, num_entries, wc,
3464 try_consume_resp_cqe(cq, qp, resp, update);
3466 cnt = process_resp_one(dev, qp, cq, wc, resp);
3474 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3475 struct rdma_cqe_requester *req, int *update)
3477 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3483 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3485 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3486 struct qedr_cq *cq = get_qedr_cq(ibcq);
3487 union rdma_cqe *cqe = cq->latest_cqe;
3488 u32 old_cons, new_cons;
3489 unsigned long flags;
3493 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3494 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3496 spin_lock_irqsave(&cq->cq_lock, flags);
3497 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3498 while (num_entries && is_valid_cqe(cq, cqe)) {
3502 /* prevent speculative reads of any field of CQE */
3505 qp = cqe_get_qp(cqe);
3507 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3513 switch (cqe_get_type(cqe)) {
3514 case RDMA_CQE_TYPE_REQUESTER:
3515 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3517 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3519 case RDMA_CQE_TYPE_RESPONDER_RQ:
3520 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3521 &cqe->resp, &update);
3523 case RDMA_CQE_TYPE_INVALID:
3525 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3534 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3536 cq->cq_cons += new_cons - old_cons;
3539 /* doorbell notifies abount latest VALID entry,
3540 * but chain already point to the next INVALID one
3542 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3544 spin_unlock_irqrestore(&cq->cq_lock, flags);
3548 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3550 const struct ib_wc *in_wc,
3551 const struct ib_grh *in_grh,
3552 const struct ib_mad_hdr *mad_hdr,
3553 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3554 size_t *out_mad_size, u16 *out_mad_pkey_index)
3556 struct qedr_dev *dev = get_qedr_dev(ibdev);
3558 DP_DEBUG(dev, QEDR_MSG_GSI,
3559 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3560 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3561 mad_hdr->class_specific, mad_hdr->class_version,
3562 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3563 return IB_MAD_RESULT_SUCCESS;
3566 int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3567 struct ib_port_immutable *immutable)
3569 struct ib_port_attr attr;
3572 err = qedr_query_port(ibdev, port_num, &attr);
3576 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3577 immutable->gid_tbl_len = attr.gid_tbl_len;
3578 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3579 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3580 immutable->max_mad_size = IB_MGMT_MAD_SIZE;