]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
iw_cxgb4: only call the cq comp_handler when the cq is armed
authorSteve Wise <swise@opengridcomputing.com>
Thu, 9 Nov 2017 15:14:43 +0000 (07:14 -0800)
committerDoug Ledford <dledford@redhat.com>
Mon, 13 Nov 2017 21:59:22 +0000 (16:59 -0500)
The ULPs completion handler should only be called if the CQ is
armed for notification.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/qp.c

index b8c7cc938bce37f19b099e97b507a3faaccffc03..a252d5c40ae3f0cdc85f3287ad677fde8a637966 100644 (file)
@@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
        if (qhp->ibqp.event_handler)
                (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
 
-       spin_lock_irqsave(&chp->comp_handler_lock, flag);
-       (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
-       spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+       if (t4_clear_cq_armed(&chp->cq)) {
+               spin_lock_irqsave(&chp->comp_handler_lock, flag);
+               (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+               spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+       }
 }
 
 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
index 1374b41201a9b019997f23956eaa298f53ef81c6..fefc5fed17782ed094f853b6d9b871d58afaaad1 100644 (file)
@@ -813,10 +813,12 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
        t4_swcq_produce(cq);
        spin_unlock_irqrestore(&schp->lock, flag);
 
-       spin_lock_irqsave(&schp->comp_handler_lock, flag);
-       (*schp->ibcq.comp_handler)(&schp->ibcq,
-                                  schp->ibcq.cq_context);
-       spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+       if (t4_clear_cq_armed(&schp->cq)) {
+               spin_lock_irqsave(&schp->comp_handler_lock, flag);
+               (*schp->ibcq.comp_handler)(&schp->ibcq,
+                                          schp->ibcq.cq_context);
+               spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+       }
 }
 
 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -842,10 +844,12 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
        t4_swcq_produce(cq);
        spin_unlock_irqrestore(&rchp->lock, flag);
 
-       spin_lock_irqsave(&rchp->comp_handler_lock, flag);
-       (*rchp->ibcq.comp_handler)(&rchp->ibcq,
-                                  rchp->ibcq.cq_context);
-       spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+       if (t4_clear_cq_armed(&rchp->cq)) {
+               spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+               (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+                                          rchp->ibcq.cq_context);
+               spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+       }
 }
 
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,