]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
IB/mlx5: Fetch soft WQE's on fatal error state
authorErez Shitrit <erezsh@mellanox.com>
Mon, 21 May 2018 08:41:01 +0000 (11:41 +0300)
committerJason Gunthorpe <jgg@mellanox.com>
Thu, 24 May 2018 15:39:25 +0000 (09:39 -0600)
On fatal error the driver simulates CQE's for ULPs that rely on
completion of all their posted work-request.

For the GSI traffic, the mlx5 has its own mechanism that sends the
completions via software CQE's directly to the relevant CQ.

This should be kept in fatal error too, so the driver should simulate
such CQE's with the specified error state in order to complete GSI QP
work requests.

Without the fix the next deadlock might appears:
        schedule_timeout+0x274/0x350
        wait_for_common+0xec/0x240
        mcast_remove_one+0xd0/0x120 [ib_core]
        ib_unregister_device+0x12c/0x230 [ib_core]
        mlx5_ib_remove+0xc4/0x270 [mlx5_ib]
        mlx5_detach_device+0x184/0x1a0 [mlx5_core]
        mlx5_unload_one+0x308/0x340 [mlx5_core]
        mlx5_pci_err_detected+0x74/0xe0 [mlx5_core]

Cc: <stable@vger.kernel.org> # 4.7
Fixes: 89ea94a7b6c4 ("IB/mlx5: Reset flow support for IB kernel ULPs")
Signed-off-by: Erez Shitrit <erezsh@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/mlx5/cq.c

index 77d257ec899be9b5ec23b7489850161a14d52d1c..9f6bc34cd4db97a9c83ad63d8816570251a2eeb8 100644 (file)
@@ -637,7 +637,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
 }
 
 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
-                       struct ib_wc *wc)
+                       struct ib_wc *wc, bool is_fatal_err)
 {
        struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
        struct mlx5_ib_wc *soft_wc, *next;
@@ -650,6 +650,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
                mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
                            cq->mcq.cqn);
 
+               if (unlikely(is_fatal_err)) {
+                       soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
+                       soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+               }
                wc[npolled++] = soft_wc->wc;
                list_del(&soft_wc->list);
                kfree(soft_wc);
@@ -670,12 +674,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 
        spin_lock_irqsave(&cq->lock, flags);
        if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-               mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+               /* make sure no soft wqe's are waiting */
+               if (unlikely(!list_empty(&cq->wc_list)))
+                       soft_polled = poll_soft_wc(cq, num_entries, wc, true);
+
+               mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
+                                    wc + soft_polled, &npolled);
                goto out;
        }
 
        if (unlikely(!list_empty(&cq->wc_list)))
-               soft_polled = poll_soft_wc(cq, num_entries, wc);
+               soft_polled = poll_soft_wc(cq, num_entries, wc, false);
 
        for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
                if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))