]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - net/sunrpc/xprtrdma/verbs.c
xprtrdma: Simplify rpcrdma_rep_create
[linux.git] / net / sunrpc / xprtrdma / verbs.c
index 84bb379245406f63026e495af1d2dbca64dc67fa..de6be101abf231e67ec61090071d624358265db4 100644 (file)
@@ -89,14 +89,12 @@ static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
  */
 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
 {
-       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
        /* Flush Receives, then wait for deferred Reply work
         * to complete.
         */
        ib_drain_rq(ia->ri_id->qp);
-       drain_workqueue(buf->rb_completion_wq);
 
        /* Deferred Reply processing might have scheduled
         * local invalidations.
@@ -901,7 +899,7 @@ struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
         * completions recently. This is a sign the Send Queue is
         * backing up. Cause the caller to pause and try again.
         */
-       set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags);
+       xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
        r_xprt->rx_stats.empty_sendctx_q++;
        return NULL;
 }
@@ -936,10 +934,7 @@ rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
        /* Paired with READ_ONCE */
        smp_store_release(&buf->rb_sc_tail, next_tail);
 
-       if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
-               smp_mb__after_atomic();
-               xprt_write_space(&sc->sc_xprt->rx_xprt);
-       }
+       xprt_write_space(&sc->sc_xprt->rx_xprt);
 }
 
 static void
@@ -977,8 +972,6 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
        r_xprt->rx_stats.mrs_allocated += count;
        spin_unlock(&buf->rb_mrlock);
        trace_xprtrdma_createmrs(r_xprt, count);
-
-       xprt_write_space(&r_xprt->rx_xprt);
 }
 
 static void
@@ -990,6 +983,7 @@ rpcrdma_mr_refresh_worker(struct work_struct *work)
                                                   rx_buf);
 
        rpcrdma_mrs_create(r_xprt);
+       xprt_write_space(&r_xprt->rx_xprt);
 }
 
 /**
@@ -1042,9 +1036,9 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
        return NULL;
 }
 
-static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp)
+static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+                                             bool temp)
 {
-       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
        struct rpcrdma_rep *rep;
 
        rep = kzalloc(sizeof(*rep), GFP_KERNEL);
@@ -1055,27 +1049,22 @@ static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp)
                                               DMA_FROM_DEVICE, GFP_KERNEL);
        if (!rep->rr_rdmabuf)
                goto out_free;
+
        xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
                     rdmab_length(rep->rr_rdmabuf));
-
        rep->rr_cqe.done = rpcrdma_wc_receive;
        rep->rr_rxprt = r_xprt;
-       INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
        rep->rr_recv_wr.next = NULL;
        rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
        rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
        rep->rr_recv_wr.num_sge = 1;
        rep->rr_temp = temp;
-
-       spin_lock(&buf->rb_lock);
-       list_add(&rep->rr_list, &buf->rb_recv_bufs);
-       spin_unlock(&buf->rb_lock);
-       return true;
+       return rep;
 
 out_free:
        kfree(rep);
 out:
-       return false;
+       return NULL;
 }
 
 /**
@@ -1089,7 +1078,6 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
        struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
        int i, rc;
 
-       buf->rb_flags = 0;
        buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests;
        buf->rb_bc_srv_max_requests = 0;
        spin_lock_init(&buf->rb_mrlock);
@@ -1122,15 +1110,6 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
        if (rc)
                goto out;
 
-       buf->rb_completion_wq = alloc_workqueue("rpcrdma-%s",
-                                               WQ_MEM_RECLAIM | WQ_HIGHPRI,
-                                               0,
-                       r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
-       if (!buf->rb_completion_wq) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
        return 0;
 out:
        rpcrdma_buffer_destroy(buf);
@@ -1204,11 +1183,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
 {
        cancel_delayed_work_sync(&buf->rb_refresh_worker);
 
-       if (buf->rb_completion_wq) {
-               destroy_workqueue(buf->rb_completion_wq);
-               buf->rb_completion_wq = NULL;
-       }
-
        rpcrdma_sendctxs_destroy(buf);
 
        while (!list_empty(&buf->rb_recv_bufs)) {
@@ -1484,8 +1458,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
        struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
        int rc;
 
-       if (!ep->rep_send_count ||
-           test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
+       if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) {
                send_wr->send_flags |= IB_SEND_SIGNALED;
                ep->rep_send_count = ep->rep_send_batch;
        } else {
@@ -1520,7 +1493,6 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
        count = 0;
        wr = NULL;
        while (needed) {
-               struct rpcrdma_regbuf *rb;
                struct rpcrdma_rep *rep;
 
                spin_lock(&buf->rb_lock);
@@ -1530,13 +1502,12 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
                        list_del(&rep->rr_list);
                spin_unlock(&buf->rb_lock);
                if (!rep) {
-                       if (!rpcrdma_rep_create(r_xprt, temp))
+                       rep = rpcrdma_rep_create(r_xprt, temp);
+                       if (!rep)
                                break;
-                       continue;
                }
 
-               rb = rep->rr_rdmabuf;
-               if (!rpcrdma_regbuf_dma_map(r_xprt, rb)) {
+               if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) {
                        rpcrdma_recv_buffer_put(rep);
                        break;
                }
@@ -1553,10 +1524,11 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
        rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
                          (const struct ib_recv_wr **)&bad_wr);
        if (rc) {
-               for (wr = bad_wr; wr; wr = wr->next) {
+               for (wr = bad_wr; wr;) {
                        struct rpcrdma_rep *rep;
 
                        rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
+                       wr = wr->next;
                        rpcrdma_recv_buffer_put(rep);
                        --count;
                }