]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
xprtrdma: Change return value of rpcrdma_prepare_send_sges()
authorChuck Lever <chuck.lever@oracle.com>
Fri, 20 Oct 2017 14:47:55 +0000 (10:47 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Fri, 17 Nov 2017 18:47:56 +0000 (13:47 -0500)
Clean up: Make rpcrdma_prepare_send_sges() return a negative errno
instead of a bool. Soon callers will want distinct treatments of
different types of failures.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/xprt_rdma.h

index d31d0ac5ada9a6a08fe6760a3b5e2cb4eaa552e9..f0d5998330fe01ab31cf6020fb64526931f95c64 100644 (file)
@@ -222,8 +222,8 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
        *p++ = xdr_zero;
        *p = xdr_zero;
 
-       if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN,
-                                      &rqst->rq_snd_buf, rpcrdma_noch))
+       if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
+                                     &rqst->rq_snd_buf, rpcrdma_noch))
                return -EIO;
        return 0;
 }
index 4f6c5395d198d484689340cccf29b1d339ae15a6..e3ece9843f9dcb7ab46aef8f6dd483ae611570ed 100644 (file)
@@ -544,7 +544,7 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
 
        if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
                if (!__rpcrdma_dma_map_regbuf(ia, rb))
-                       return false;
+                       goto out_regbuf;
                sge->addr = rdmab_addr(rb);
                sge->lkey = rdmab_lkey(rb);
        }
@@ -554,6 +554,10 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
                                      sge->length, DMA_TO_DEVICE);
        req->rl_send_wr.num_sge++;
        return true;
+
+out_regbuf:
+       pr_err("rpcrdma: failed to DMA map a Send buffer\n");
+       return false;
 }
 
 /* Prepare the Send SGEs. The head and tail iovec, and each entry
@@ -574,7 +578,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
         * DMA-mapped. Sync the content that has changed.
         */
        if (!rpcrdma_dma_map_regbuf(ia, rb))
-               return false;
+               goto out_regbuf;
        sge_no = 1;
        sge[sge_no].addr = rdmab_addr(rb);
        sge[sge_no].length = xdr->head[0].iov_len;
@@ -662,6 +666,10 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
        req->rl_send_wr.num_sge += sge_no;
        return true;
 
+out_regbuf:
+       pr_err("rpcrdma: failed to DMA map a Send buffer\n");
+       return false;
+
 out_mapping_overflow:
        rpcrdma_unmap_sges(ia, req);
        pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
@@ -673,26 +681,32 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
        return false;
 }
 
-bool
-rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
-                         u32 hdrlen, struct xdr_buf *xdr,
-                         enum rpcrdma_chunktype rtype)
+/**
+ * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
+ * @r_xprt: controlling transport
+ * @req: context of RPC Call being marshalled
+ * @hdrlen: size of transport header, in bytes
+ * @xdr: xdr_buf containing RPC Call
+ * @rtype: chunk type being encoded
+ *
+ * Returns 0 on success; otherwise a negative errno is returned.
+ */
+int
+rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
+                         struct rpcrdma_req *req, u32 hdrlen,
+                         struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
 {
        req->rl_send_wr.num_sge = 0;
        req->rl_mapped_sges = 0;
 
-       if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
-               goto out_map;
+       if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
+               return -EIO;
 
        if (rtype != rpcrdma_areadch)
-               if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
-                       goto out_map;
-
-       return true;
+               if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
+                       return -EIO;
 
-out_map:
-       pr_err("rpcrdma: failed to DMA map a Send buffer\n");
-       return false;
+       return 0;
 }
 
 /**
@@ -843,12 +857,10 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
                transfertypes[rtype], transfertypes[wtype],
                xdr_stream_pos(xdr));
 
-       if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req,
-                                      xdr_stream_pos(xdr),
-                                      &rqst->rq_snd_buf, rtype)) {
-               ret = -EIO;
+       ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
+                                       &rqst->rq_snd_buf, rtype);
+       if (ret)
                goto out_err;
-       }
        return 0;
 
 out_err:
index 0e0ae6195a5bd493413c88699ade78481acc88ef..0b8ca5e5c706815f069fad67b6964a8f5d248a8a 100644 (file)
@@ -613,8 +613,10 @@ enum rpcrdma_chunktype {
        rpcrdma_replych
 };
 
-bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
-                              u32, struct xdr_buf *, enum rpcrdma_chunktype);
+int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
+                             struct rpcrdma_req *req, u32 hdrlen,
+                             struct xdr_buf *xdr,
+                             enum rpcrdma_chunktype rtype);
 void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
 int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);