]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
xprtrdma: Refactor rpcrdma_deferred_completion
authorChuck Lever <chuck.lever@oracle.com>
Fri, 20 Oct 2017 14:48:28 +0000 (10:48 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Fri, 17 Nov 2017 18:47:57 +0000 (13:47 -0500)
Invoke a common routine for releasing hardware resources (for
example, invalidating MRs). This needs to be done whether an
RPC Reply has arrived or the RPC was terminated early.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 9951c81b82ed586ed0a9236099131fe02ffce542..853dede38900aaaced5dc5ab83cb59197ced5e72 100644 (file)
@@ -1293,6 +1293,20 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
        goto out;
 }
 
+void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+{
+       /* Invalidate and unmap the data payloads before waking
+        * the waiting application. This guarantees the memory
+        * regions are properly fenced from the server before the
+        * application accesses the data. It also ensures proper
+        * send flow control: waking the next RPC waits until this
+        * RPC has relinquished all its Send Queue entries.
+        */
+       if (!list_empty(&req->rl_registered))
+               r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
+                                                   &req->rl_registered);
+}
+
 /* Reply handling runs in the poll worker thread. Anything that
  * might wait is deferred to a separate workqueue.
  */
@@ -1301,18 +1315,9 @@ void rpcrdma_deferred_completion(struct work_struct *work)
        struct rpcrdma_rep *rep =
                        container_of(work, struct rpcrdma_rep, rr_work);
        struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
-       struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
 
-       /* Invalidate and unmap the data payloads before waking
-        * the waiting application. This guarantees the memory
-        * regions are properly fenced from the server before the
-        * application accesses the data. It also ensures proper
-        * send flow control: waking the next RPC waits until this
-        * RPC has relinquished all its Send Queue entries.
-        */
        rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
-       r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &req->rl_registered);
-
+       rpcrdma_release_rqst(rep->rr_rxprt, req);
        rpcrdma_complete_rqst(rep);
 }
 
@@ -1374,6 +1379,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
        req = rpcr_to_rdmar(rqst);
        req->rl_reply = rep;
        rep->rr_rqst = rqst;
+       clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
 
        dprintk("RPC:       %s: reply %p completes request %p (xid 0x%08x)\n",
                __func__, rep, req, be32_to_cpu(rep->rr_xid));
index acdb2e9c72c8fe22167d58b859d9a65f5de4b83b..35aefe201848c70cdb800344afa8d5a7f52d6e8f 100644 (file)
@@ -678,15 +678,14 @@ xprt_rdma_free(struct rpc_task *task)
        struct rpc_rqst *rqst = task->tk_rqstp;
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
        struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
-       struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
        if (test_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags))
                return;
 
        dprintk("RPC:       %s: called on 0x%p\n", __func__, req->rl_reply);
 
-       if (!list_empty(&req->rl_registered))
-               ia->ri_ops->ro_unmap_sync(r_xprt, &req->rl_registered);
+       if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
+               rpcrdma_release_rqst(r_xprt, req);
        rpcrdma_buffer_put(req);
 }
 
@@ -742,6 +741,7 @@ xprt_rdma_send_request(struct rpc_task *task)
                goto drop_connection;
        req->rl_connect_cookie = xprt->connect_cookie;
 
+       set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
        if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
                goto drop_connection;
 
index 417532069842f84b94f40d608ab05732cc07e597..c260475baa367e91bd1b8c35cf2f350b895e14d6 100644 (file)
@@ -386,6 +386,7 @@ struct rpcrdma_req {
 /* rl_flags */
 enum {
        RPCRDMA_REQ_F_BACKCHANNEL = 0,
+       RPCRDMA_REQ_F_PENDING,
 };
 
 static inline void
@@ -655,6 +656,8 @@ int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
 void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
+void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt,
+                         struct rpcrdma_req *req);
 void rpcrdma_deferred_completion(struct work_struct *work);
 
 static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)