]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - net/sunrpc/xprtrdma/verbs.c
xprtrdma: Streamline rpcrdma_post_recvs
[linux.git] / net / sunrpc / xprtrdma / verbs.c
index de6be101abf231e67ec61090071d624358265db4..3270c8ad18195965b7d792f68a8353b4b8452f1c 100644 (file)
@@ -1478,11 +1478,13 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
 {
        struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
        struct rpcrdma_ep *ep = &r_xprt->rx_ep;
-       struct ib_recv_wr *wr, *bad_wr;
+       struct ib_recv_wr *i, *wr, *bad_wr;
+       struct rpcrdma_rep *rep;
        int needed, count, rc;
 
        rc = 0;
        count = 0;
+
        needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
        if (ep->rep_receive_count > needed)
                goto out;
@@ -1490,39 +1492,48 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
        if (!temp)
                needed += RPCRDMA_MAX_RECV_BATCH;
 
-       count = 0;
+       /* fast path: all needed reps can be found on the free list */
        wr = NULL;
+       spin_lock(&buf->rb_lock);
        while (needed) {
-               struct rpcrdma_rep *rep;
-
-               spin_lock(&buf->rb_lock);
                rep = list_first_entry_or_null(&buf->rb_recv_bufs,
                                               struct rpcrdma_rep, rr_list);
-               if (likely(rep))
-                       list_del(&rep->rr_list);
-               spin_unlock(&buf->rb_lock);
-               if (!rep) {
-                       rep = rpcrdma_rep_create(r_xprt, temp);
-                       if (!rep)
-                               break;
-               }
+               if (!rep)
+                       break;
 
-               if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) {
-                       rpcrdma_recv_buffer_put(rep);
+               list_del(&rep->rr_list);
+               rep->rr_recv_wr.next = wr;
+               wr = &rep->rr_recv_wr;
+               --needed;
+       }
+       spin_unlock(&buf->rb_lock);
+
+       while (needed) {
+               rep = rpcrdma_rep_create(r_xprt, temp);
+               if (!rep)
                        break;
-               }
 
-               trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
                rep->rr_recv_wr.next = wr;
                wr = &rep->rr_recv_wr;
-               ++count;
                --needed;
        }
-       if (!count)
+       if (!wr)
                goto out;
 
+       for (i = wr; i; i = i->next) {
+               rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
+
+               if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
+                       goto release_wrs;
+
+               trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
+               ++count;
+       }
+
        rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
                          (const struct ib_recv_wr **)&bad_wr);
+out:
+       trace_xprtrdma_post_recvs(r_xprt, count, rc);
        if (rc) {
                for (wr = bad_wr; wr;) {
                        struct rpcrdma_rep *rep;
@@ -1534,6 +1545,12 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
                }
        }
        ep->rep_receive_count += count;
-out:
-       trace_xprtrdma_post_recvs(r_xprt, count, rc);
+       return;
+
+release_wrs:
+       for (i = wr; i;) {
+               rep = container_of(i, struct rpcrdma_rep, rr_recv_wr);
+               i = i->next;
+               rpcrdma_recv_buffer_put(rep);
+       }
 }