]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
xprtrdma: Use ib_device pointer safely
authorChuck Lever <chuck.lever@oracle.com>
Tue, 26 May 2015 15:51:56 +0000 (11:51 -0400)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Fri, 12 Jun 2015 17:10:36 +0000 (13:10 -0400)
The connect worker can replace ri_id, but prevents ri_id->device
from changing during the lifetime of a transport instance. The old
ID is kept around until a new ID is created and the ->device is
confirmed to be the same.

Cache a copy of ri_id->device in rpcrdma_ia and in rpcrdma_rep.
The cached copy can be used safely in code that does not serialize
with the connect worker.

Other code can use it to save an extra address generation (one
pointer dereference instead of two).

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Tested-By: Devesh Sharma <devesh.sharma@avagotech.com>
Reviewed-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/fmr_ops.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/physical_ops.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 302d4ebf6fbfb2a2c15780ef966b52bb473b91e4..0a96155bb03ad4bc2689dd8cd956d66459e580ee 100644 (file)
@@ -85,7 +85,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
           int nsegs, bool writing)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-       struct ib_device *device = ia->ri_id->device;
+       struct ib_device *device = ia->ri_device;
        enum dma_data_direction direction = rpcrdma_data_dir(writing);
        struct rpcrdma_mr_seg *seg1 = seg;
        struct rpcrdma_mw *mw = seg1->rl_mw;
@@ -137,17 +137,13 @@ fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
        struct rpcrdma_mr_seg *seg1 = seg;
-       struct ib_device *device;
        int rc, nsegs = seg->mr_nsegs;
        LIST_HEAD(l);
 
        list_add(&seg1->rl_mw->r.fmr->list, &l);
        rc = ib_unmap_fmr(&l);
-       read_lock(&ia->ri_qplock);
-       device = ia->ri_id->device;
        while (seg1->mr_nsegs--)
-               rpcrdma_unmap_one(device, seg++);
-       read_unlock(&ia->ri_qplock);
+               rpcrdma_unmap_one(ia->ri_device, seg++);
        if (rc)
                goto out_err;
        return nsegs;
index dff0481dbcf8011dbbdf3050dd4ff9e67f0a6df3..66a85faf8bd994950ad23c505b5d71fb95252a03 100644 (file)
@@ -137,7 +137,7 @@ static int
 frwr_op_init(struct rpcrdma_xprt *r_xprt)
 {
        struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-       struct ib_device *device = r_xprt->rx_ia.ri_id->device;
+       struct ib_device *device = r_xprt->rx_ia.ri_device;
        unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
        struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
        int i;
@@ -178,7 +178,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
            int nsegs, bool writing)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-       struct ib_device *device = ia->ri_id->device;
+       struct ib_device *device = ia->ri_device;
        enum dma_data_direction direction = rpcrdma_data_dir(writing);
        struct rpcrdma_mr_seg *seg1 = seg;
        struct rpcrdma_mw *mw = seg1->rl_mw;
@@ -263,7 +263,6 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
        struct ib_send_wr invalidate_wr, *bad_wr;
        int rc, nsegs = seg->mr_nsegs;
-       struct ib_device *device;
 
        seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
 
@@ -273,10 +272,9 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
        invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
        DECR_CQCOUNT(&r_xprt->rx_ep);
 
-       read_lock(&ia->ri_qplock);
-       device = ia->ri_id->device;
        while (seg1->mr_nsegs--)
-               rpcrdma_unmap_one(device, seg++);
+               rpcrdma_unmap_one(ia->ri_device, seg++);
+       read_lock(&ia->ri_qplock);
        rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
        read_unlock(&ia->ri_qplock);
        if (rc)
@@ -304,7 +302,7 @@ static void
 frwr_op_reset(struct rpcrdma_xprt *r_xprt)
 {
        struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-       struct ib_device *device = r_xprt->rx_ia.ri_id->device;
+       struct ib_device *device = r_xprt->rx_ia.ri_device;
        unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
        struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
        struct rpcrdma_mw *r;
index ba518af167873dfe2e9c1f5f6723665bda2bd2e7..da149e892858af34d51e40e46c4c3d8d9e0c7b57 100644 (file)
@@ -50,8 +50,7 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
-       rpcrdma_map_one(ia->ri_id->device, seg,
-                       rpcrdma_data_dir(writing));
+       rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
        seg->mr_rkey = ia->ri_bind_mem->rkey;
        seg->mr_base = seg->mr_dma;
        seg->mr_nsegs = 1;
@@ -65,10 +64,7 @@ physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
 {
        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 
-       read_lock(&ia->ri_qplock);
-       rpcrdma_unmap_one(ia->ri_id->device, seg);
-       read_unlock(&ia->ri_qplock);
-
+       rpcrdma_unmap_one(ia->ri_device, seg);
        return 1;
 }
 
index 8e0bd84c8df873169871272c646fdc0241fb3540..ddd5b362da357a1ddddedb3137aae1597ef68aaf 100644 (file)
@@ -272,7 +272,6 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
 {
        struct rpcrdma_rep *rep =
                        (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
-       struct rpcrdma_ia *ia;
 
        /* WARNING: Only wr_id and status are reliable at this point */
        if (wc->status != IB_WC_SUCCESS)
@@ -285,9 +284,8 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
        dprintk("RPC:       %s: rep %p opcode 'recv', length %u: success\n",
                __func__, rep, wc->byte_len);
 
-       ia = &rep->rr_rxprt->rx_ia;
        rep->rr_len = wc->byte_len;
-       ib_dma_sync_single_for_cpu(ia->ri_id->device,
+       ib_dma_sync_single_for_cpu(rep->rr_device,
                                   rdmab_addr(rep->rr_rdmabuf),
                                   rep->rr_len, DMA_FROM_DEVICE);
        prefetch(rdmab_to_msg(rep->rr_rdmabuf));
@@ -483,7 +481,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
 
                pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
                        sap, rpc_get_port(sap),
-                       ia->ri_id->device->name,
+                       ia->ri_device->name,
                        ia->ri_ops->ro_displayname,
                        xprt->rx_buf.rb_max_requests,
                        ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
@@ -584,8 +582,9 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
                rc = PTR_ERR(ia->ri_id);
                goto out1;
        }
+       ia->ri_device = ia->ri_id->device;
 
-       ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
+       ia->ri_pd = ib_alloc_pd(ia->ri_device);
        if (IS_ERR(ia->ri_pd)) {
                rc = PTR_ERR(ia->ri_pd);
                dprintk("RPC:       %s: ib_alloc_pd() failed %i\n",
@@ -593,7 +592,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
                goto out2;
        }
 
-       rc = ib_query_device(ia->ri_id->device, devattr);
+       rc = ib_query_device(ia->ri_device, devattr);
        if (rc) {
                dprintk("RPC:       %s: ib_query_device failed %d\n",
                        __func__, rc);
@@ -602,7 +601,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
 
        if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
                ia->ri_have_dma_lkey = 1;
-               ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
+               ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
        }
 
        if (memreg == RPCRDMA_FRMR) {
@@ -617,7 +616,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
                }
        }
        if (memreg == RPCRDMA_MTHCAFMR) {
-               if (!ia->ri_id->device->alloc_fmr) {
+               if (!ia->ri_device->alloc_fmr) {
                        dprintk("RPC:       %s: MTHCAFMR registration "
                                "not supported by HCA\n", __func__);
                        memreg = RPCRDMA_ALLPHYSICAL;
@@ -767,9 +766,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
        init_waitqueue_head(&ep->rep_connect_wait);
        INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
 
-       sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
-                                 rpcrdma_cq_async_error_upcall, ep,
-                                 ep->rep_attr.cap.max_send_wr + 1, 0);
+       sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
+                             rpcrdma_cq_async_error_upcall, ep,
+                             ep->rep_attr.cap.max_send_wr + 1, 0);
        if (IS_ERR(sendcq)) {
                rc = PTR_ERR(sendcq);
                dprintk("RPC:       %s: failed to create send CQ: %i\n",
@@ -784,9 +783,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
                goto out2;
        }
 
-       recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
-                                 rpcrdma_cq_async_error_upcall, ep,
-                                 ep->rep_attr.cap.max_recv_wr + 1, 0);
+       recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
+                             rpcrdma_cq_async_error_upcall, ep,
+                             ep->rep_attr.cap.max_recv_wr + 1, 0);
        if (IS_ERR(recvcq)) {
                rc = PTR_ERR(recvcq);
                dprintk("RPC:       %s: failed to create recv CQ: %i\n",
@@ -907,7 +906,7 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
                 * More stuff I haven't thought of!
                 * Rrrgh!
                 */
-               if (ia->ri_id->device != id->device) {
+               if (ia->ri_device != id->device) {
                        printk("RPC:       %s: can't reconnect on "
                                "different device!\n", __func__);
                        rdma_destroy_id(id);
@@ -1049,6 +1048,7 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
                goto out_free;
        }
 
+       rep->rr_device = ia->ri_device;
        rep->rr_rxprt = r_xprt;
        return rep;
 
@@ -1449,9 +1449,9 @@ rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
        /*
         * All memory passed here was kmalloc'ed, therefore phys-contiguous.
         */
-       iov->addr = ib_dma_map_single(ia->ri_id->device,
+       iov->addr = ib_dma_map_single(ia->ri_device,
                        va, len, DMA_BIDIRECTIONAL);
-       if (ib_dma_mapping_error(ia->ri_id->device, iov->addr))
+       if (ib_dma_mapping_error(ia->ri_device, iov->addr))
                return -ENOMEM;
 
        iov->length = len;
@@ -1495,8 +1495,8 @@ rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
 {
        int rc;
 
-       ib_dma_unmap_single(ia->ri_id->device,
-                       iov->addr, iov->length, DMA_BIDIRECTIONAL);
+       ib_dma_unmap_single(ia->ri_device,
+                           iov->addr, iov->length, DMA_BIDIRECTIONAL);
 
        if (NULL == mr)
                return 0;
@@ -1589,15 +1589,18 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
        send_wr.num_sge = req->rl_niovs;
        send_wr.opcode = IB_WR_SEND;
        if (send_wr.num_sge == 4)       /* no need to sync any pad (constant) */
-               ib_dma_sync_single_for_device(ia->ri_id->device,
-                       req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
-                       DMA_TO_DEVICE);
-       ib_dma_sync_single_for_device(ia->ri_id->device,
-               req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
-               DMA_TO_DEVICE);
-       ib_dma_sync_single_for_device(ia->ri_id->device,
-               req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
-               DMA_TO_DEVICE);
+               ib_dma_sync_single_for_device(ia->ri_device,
+                                             req->rl_send_iov[3].addr,
+                                             req->rl_send_iov[3].length,
+                                             DMA_TO_DEVICE);
+       ib_dma_sync_single_for_device(ia->ri_device,
+                                     req->rl_send_iov[1].addr,
+                                     req->rl_send_iov[1].length,
+                                     DMA_TO_DEVICE);
+       ib_dma_sync_single_for_device(ia->ri_device,
+                                     req->rl_send_iov[0].addr,
+                                     req->rl_send_iov[0].length,
+                                     DMA_TO_DEVICE);
 
        if (DECR_CQCOUNT(ep) > 0)
                send_wr.send_flags = 0;
@@ -1630,7 +1633,7 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
        recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
        recv_wr.num_sge = 1;
 
-       ib_dma_sync_single_for_cpu(ia->ri_id->device,
+       ib_dma_sync_single_for_cpu(ia->ri_device,
                                   rdmab_addr(rep->rr_rdmabuf),
                                   rdmab_length(rep->rr_rdmabuf),
                                   DMA_BIDIRECTIONAL);
index 230e7fe5e31a68ad798001f789fd281e5722be0f..300423dea19c508d12e5c9d06e517e9dd5e16884 100644 (file)
@@ -62,6 +62,7 @@
 struct rpcrdma_ia {
        const struct rpcrdma_memreg_ops *ri_ops;
        rwlock_t                ri_qplock;
+       struct ib_device        *ri_device;
        struct rdma_cm_id       *ri_id;
        struct ib_pd            *ri_pd;
        struct ib_mr            *ri_bind_mem;
@@ -173,6 +174,7 @@ struct rpcrdma_buffer;
 
 struct rpcrdma_rep {
        unsigned int            rr_len;
+       struct ib_device        *rr_device;
        struct rpcrdma_xprt     *rr_rxprt;
        struct list_head        rr_list;
        struct rpcrdma_regbuf   *rr_rdmabuf;