]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
iw_cxgb4: reflect the original WR opcode in drain cqes
authorSteve Wise <swise@opengridcomputing.com>
Tue, 19 Dec 2017 18:29:25 +0000 (10:29 -0800)
committerJason Gunthorpe <jgg@mellanox.com>
Thu, 21 Dec 2017 23:06:06 +0000 (16:06 -0700)
The flush/drain logic was not retaining the original wr opcode in
its completion.  This can cause problems if the application uses
the completion opcode to make decisions.

Use bit 10 of the CQE header word to indicate the CQE is a special
drain completion, and save the original WR opcode in the cqe header
opcode field.

Fixes: 4fe7c2962e11 ("iw_cxgb4: refactor sq/rq drain logic")
Cc: stable@vger.kernel.org
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h

index 7ed87622e4618a6120cb357de5f800c8049b24ca..6f2b26126c64a4503b6a3bf8b8c3991b65b65012 100644 (file)
@@ -395,7 +395,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 
 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 {
-       if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
+       if (DRAIN_CQE(cqe)) {
                WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
                return 0;
        }
@@ -494,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
        /*
         * Special cqe for drain WR completions...
         */
-       if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+       if (DRAIN_CQE(hw_cqe)) {
                *cookie = CQE_DRAIN_COOKIE(hw_cqe);
                *cqe = *hw_cqe;
                goto skip_cqe;
@@ -748,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                                c4iw_invalidate_mr(qhp->rhp,
                                                   CQE_WRID_FR_STAG(&cqe));
                        break;
-               case C4IW_DRAIN_OPCODE:
-                       wc->opcode = IB_WC_SEND;
-                       break;
                default:
                        pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
index 470f97a79ebb7f90e649179ab34b30ad0c089df7..65dd3726ca024db4e0fabff5c4527c676757065e 100644 (file)
@@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
        return IB_QPS_ERR;
 }
 
-#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
-
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
index 38bddd02a9437470e0f3ed98a7e55afbc8cc7384..21495f917bccbbcf140cc22ba3b07bf8fdf8660a 100644 (file)
@@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
        return 0;
 }
 
-static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+static int ib_to_fw_opcode(int ib_opcode)
+{
+       int opcode;
+
+       switch (ib_opcode) {
+       case IB_WR_SEND_WITH_INV:
+               opcode = FW_RI_SEND_WITH_INV;
+               break;
+       case IB_WR_SEND:
+               opcode = FW_RI_SEND;
+               break;
+       case IB_WR_RDMA_WRITE:
+               opcode = FW_RI_RDMA_WRITE;
+               break;
+       case IB_WR_RDMA_READ:
+       case IB_WR_RDMA_READ_WITH_INV:
+               opcode = FW_RI_READ_REQ;
+               break;
+       case IB_WR_REG_MR:
+               opcode = FW_RI_FAST_REGISTER;
+               break;
+       case IB_WR_LOCAL_INV:
+               opcode = FW_RI_LOCAL_INV;
+               break;
+       default:
+               opcode = -EINVAL;
+       }
+       return opcode;
+}
+
+static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
 {
        struct t4_cqe cqe = {};
        struct c4iw_cq *schp;
        unsigned long flag;
        struct t4_cq *cq;
+       int opcode;
 
        schp = to_c4iw_cq(qhp->ibqp.send_cq);
        cq = &schp->cq;
 
+       opcode = ib_to_fw_opcode(wr->opcode);
+       if (opcode < 0)
+               return opcode;
+
        cqe.u.drain_cookie = wr->wr_id;
        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
-                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_OPCODE_V(opcode) |
                                 CQE_TYPE_V(1) |
                                 CQE_SWCQE_V(1) |
+                                CQE_DRAIN_V(1) |
                                 CQE_QPID_V(qhp->wq.sq.qid));
 
        spin_lock_irqsave(&schp->lock, flag);
@@ -819,6 +855,7 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
                                           schp->ibcq.cq_context);
                spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
        }
+       return 0;
 }
 
 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -833,9 +870,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
 
        cqe.u.drain_cookie = wr->wr_id;
        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
-                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_OPCODE_V(FW_RI_SEND) |
                                 CQE_TYPE_V(0) |
                                 CQE_SWCQE_V(1) |
+                                CQE_DRAIN_V(1) |
                                 CQE_QPID_V(qhp->wq.sq.qid));
 
        spin_lock_irqsave(&rchp->lock, flag);
@@ -875,7 +913,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
         */
        if (qhp->wq.flushed) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               complete_sq_drain_wr(qhp, wr);
+               err = complete_sq_drain_wr(qhp, wr);
                return err;
        }
        num_wrs = t4_sq_avail(&qhp->wq);
index e9ea94268d51545b07400ed9cba095d8461fb2a1..79e8ee12c391cf6d800911d4b2f0e16063c6de39 100644 (file)
@@ -197,6 +197,11 @@ struct t4_cqe {
 #define CQE_SWCQE_G(x)    ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
 #define CQE_SWCQE_V(x)   ((x)<<CQE_SWCQE_S)
 
+#define CQE_DRAIN_S       10
+#define CQE_DRAIN_M       0x1
+#define CQE_DRAIN_G(x)    ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
+#define CQE_DRAIN_V(x)   ((x)<<CQE_DRAIN_S)
+
 #define CQE_STATUS_S      5
 #define CQE_STATUS_M      0x1F
 #define CQE_STATUS_G(x)   ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
@@ -213,6 +218,7 @@ struct t4_cqe {
 #define CQE_OPCODE_V(x)   ((x)<<CQE_OPCODE_S)
 
 #define SW_CQE(x)         (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define DRAIN_CQE(x)      (CQE_DRAIN_G(be32_to_cpu((x)->header)))
 #define CQE_QPID(x)       (CQE_QPID_G(be32_to_cpu((x)->header)))
 #define CQE_TYPE(x)       (CQE_TYPE_G(be32_to_cpu((x)->header)))
 #define SQ_TYPE(x)       (CQE_TYPE((x)))