]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
iw_cxgb4: atomically flush the qp
authorSteve Wise <swise@opengridcomputing.com>
Thu, 9 Nov 2017 15:21:26 +0000 (07:21 -0800)
committerDoug Ledford <dledford@redhat.com>
Mon, 13 Nov 2017 21:59:22 +0000 (16:59 -0500)
__flush_qp() has a race condition where during the flush operation,
the qp lock is released allowing another thread to possibly post a WR,
which corrupts the queue state, possibly causing crashes.  The lock was
released to preserve the cq/qp locking hierarchy of cq first, then qp.
However releasing the qp lock is not necessary; both RQ and SQ CQ locks
can be acquired first, followed by the qp lock, and then the RQ and SQ
flushing can be done w/o unlocking.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/cxgb4/qp.c

index fefc5fed17782ed094f853b6d9b871d58afaaad1..5ee7fe433136bc22dc7a55ce9bad4930ee49ae1d 100644 (file)
@@ -1255,31 +1255,34 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
 
        pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
 
-       /* locking hierarchy: cq lock first, then qp lock. */
+       /* locking hierarchy: cqs lock first, then qp lock. */
        spin_lock_irqsave(&rchp->lock, flag);
+       if (schp != rchp)
+               spin_lock(&schp->lock);
        spin_lock(&qhp->lock);
 
        if (qhp->wq.flushed) {
                spin_unlock(&qhp->lock);
+               if (schp != rchp)
+                       spin_unlock(&schp->lock);
                spin_unlock_irqrestore(&rchp->lock, flag);
                return;
        }
        qhp->wq.flushed = 1;
+       t4_set_wq_in_error(&qhp->wq);
 
        c4iw_flush_hw_cq(rchp);
        c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
        rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
-       spin_unlock(&qhp->lock);
-       spin_unlock_irqrestore(&rchp->lock, flag);
 
-       /* locking hierarchy: cq lock first, then qp lock. */
-       spin_lock_irqsave(&schp->lock, flag);
-       spin_lock(&qhp->lock);
        if (schp != rchp)
                c4iw_flush_hw_cq(schp);
        sq_flushed = c4iw_flush_sq(qhp);
+
        spin_unlock(&qhp->lock);
-       spin_unlock_irqrestore(&schp->lock, flag);
+       if (schp != rchp)
+               spin_unlock(&schp->lock);
+       spin_unlock_irqrestore(&rchp->lock, flag);
 
        if (schp == rchp) {
                if (t4_clear_cq_armed(&rchp->cq) &&
@@ -1313,8 +1316,8 @@ static void flush_qp(struct c4iw_qp *qhp)
        rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
        schp = to_c4iw_cq(qhp->ibqp.send_cq);
 
-       t4_set_wq_in_error(&qhp->wq);
        if (qhp->ibqp.uobject) {
+               t4_set_wq_in_error(&qhp->wq);
                t4_set_cq_in_error(&rchp->cq);
                spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);