]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/infiniband/ulp/srp/ib_srp.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[linux.git] / drivers / infiniband / ulp / srp / ib_srp.c
index b481490ad25756f6de36cd718c0983be751c8e5c..9909022dc6c3eebd4c52ef3e7dca5221dc8b6038 100644 (file)
@@ -340,8 +340,6 @@ static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
                return;
 
        for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
-               if (d->frpl)
-                       ib_free_fast_reg_page_list(d->frpl);
                if (d->mr)
                        ib_dereg_mr(d->mr);
        }
@@ -362,7 +360,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        struct srp_fr_pool *pool;
        struct srp_fr_desc *d;
        struct ib_mr *mr;
-       struct ib_fast_reg_page_list *frpl;
        int i, ret = -EINVAL;
 
        if (pool_size <= 0)
@@ -385,12 +382,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
                        goto destroy_pool;
                }
                d->mr = mr;
-               frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
-               if (IS_ERR(frpl)) {
-                       ret = PTR_ERR(frpl);
-                       goto destroy_pool;
-               }
-               d->frpl = frpl;
                list_add_tail(&d->entry, &pool->free_list);
        }
 
@@ -849,11 +840,12 @@ static void srp_free_req_data(struct srp_target_port *target,
 
        for (i = 0; i < target->req_ring_size; ++i) {
                req = &ch->req_ring[i];
-               if (dev->use_fast_reg)
+               if (dev->use_fast_reg) {
                        kfree(req->fr_list);
-               else
+               } else {
                        kfree(req->fmr_list);
-               kfree(req->map_page);
+                       kfree(req->map_page);
+               }
                if (req->indirect_dma_addr) {
                        ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
                                            target->indirect_size,
@@ -887,14 +879,15 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
                                  GFP_KERNEL);
                if (!mr_list)
                        goto out;
-               if (srp_dev->use_fast_reg)
+               if (srp_dev->use_fast_reg) {
                        req->fr_list = mr_list;
-               else
+               } else {
                        req->fmr_list = mr_list;
-               req->map_page = kmalloc(srp_dev->max_pages_per_mr *
-                                       sizeof(void *), GFP_KERNEL);
-               if (!req->map_page)
-                       goto out;
+                       req->map_page = kmalloc(srp_dev->max_pages_per_mr *
+                                               sizeof(void *), GFP_KERNEL);
+                       if (!req->map_page)
+                               goto out;
+               }
                req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
                if (!req->indirect_desc)
                        goto out;
@@ -1286,6 +1279,17 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
        if (state->fmr.next >= state->fmr.end)
                return -ENOMEM;
 
+       WARN_ON_ONCE(!dev->use_fmr);
+
+       if (state->npages == 0)
+               return 0;
+
+       if (state->npages == 1 && target->global_mr) {
+               srp_map_desc(state, state->base_dma_addr, state->dma_len,
+                            target->global_mr->rkey);
+               goto reset_state;
+       }
+
        fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
                                   state->npages, io_addr);
        if (IS_ERR(fmr))
@@ -1297,6 +1301,10 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
        srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
                     state->dma_len, fmr->fmr->rkey);
 
+reset_state:
+       state->npages = 0;
+       state->dma_len = 0;
+
        return 0;
 }
 
@@ -1306,13 +1314,26 @@ static int srp_map_finish_fr(struct srp_map_state *state,
        struct srp_target_port *target = ch->target;
        struct srp_device *dev = target->srp_host->srp_dev;
        struct ib_send_wr *bad_wr;
-       struct ib_send_wr wr;
+       struct ib_reg_wr wr;
        struct srp_fr_desc *desc;
        u32 rkey;
+       int n, err;
 
        if (state->fr.next >= state->fr.end)
                return -ENOMEM;
 
+       WARN_ON_ONCE(!dev->use_fast_reg);
+
+       if (state->sg_nents == 0)
+               return 0;
+
+       if (state->sg_nents == 1 && target->global_mr) {
+               srp_map_desc(state, sg_dma_address(state->sg),
+                            sg_dma_len(state->sg),
+                            target->global_mr->rkey);
+               return 1;
+       }
+
        desc = srp_fr_pool_get(ch->fr_pool);
        if (!desc)
                return -ENOMEM;
@@ -1320,56 +1341,33 @@ static int srp_map_finish_fr(struct srp_map_state *state,
        rkey = ib_inc_rkey(desc->mr->rkey);
        ib_update_fast_reg_key(desc->mr, rkey);
 
-       memcpy(desc->frpl->page_list, state->pages,
-              sizeof(state->pages[0]) * state->npages);
-
-       memset(&wr, 0, sizeof(wr));
-       wr.opcode = IB_WR_FAST_REG_MR;
-       wr.wr_id = FAST_REG_WR_ID_MASK;
-       wr.wr.fast_reg.iova_start = state->base_dma_addr;
-       wr.wr.fast_reg.page_list = desc->frpl;
-       wr.wr.fast_reg.page_list_len = state->npages;
-       wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
-       wr.wr.fast_reg.length = state->dma_len;
-       wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
-                                      IB_ACCESS_REMOTE_READ |
-                                      IB_ACCESS_REMOTE_WRITE);
-       wr.wr.fast_reg.rkey = desc->mr->lkey;
+       n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
+                        dev->mr_page_size);
+       if (unlikely(n < 0))
+               return n;
+
+       wr.wr.next = NULL;
+       wr.wr.opcode = IB_WR_REG_MR;
+       wr.wr.wr_id = FAST_REG_WR_ID_MASK;
+       wr.wr.num_sge = 0;
+       wr.wr.send_flags = 0;
+       wr.mr = desc->mr;
+       wr.key = desc->mr->rkey;
+       wr.access = (IB_ACCESS_LOCAL_WRITE |
+                    IB_ACCESS_REMOTE_READ |
+                    IB_ACCESS_REMOTE_WRITE);
 
        *state->fr.next++ = desc;
        state->nmdesc++;
 
-       srp_map_desc(state, state->base_dma_addr, state->dma_len,
-                    desc->mr->rkey);
+       srp_map_desc(state, desc->mr->iova,
+                    desc->mr->length, desc->mr->rkey);
 
-       return ib_post_send(ch->qp, &wr, &bad_wr);
-}
+       err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
+       if (unlikely(err))
+               return err;
 
-static int srp_finish_mapping(struct srp_map_state *state,
-                             struct srp_rdma_ch *ch)
-{
-       struct srp_target_port *target = ch->target;
-       struct srp_device *dev = target->srp_host->srp_dev;
-       int ret = 0;
-
-       WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
-
-       if (state->npages == 0)
-               return 0;
-
-       if (state->npages == 1 && target->global_mr)
-               srp_map_desc(state, state->base_dma_addr, state->dma_len,
-                            target->global_mr->rkey);
-       else
-               ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
-                       srp_map_finish_fmr(state, ch);
-
-       if (ret == 0) {
-               state->npages = 0;
-               state->dma_len = 0;
-       }
-
-       return ret;
+       return n;
 }
 
 static int srp_map_sg_entry(struct srp_map_state *state,
@@ -1389,7 +1387,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
        while (dma_len) {
                unsigned offset = dma_addr & ~dev->mr_page_mask;
                if (state->npages == dev->max_pages_per_mr || offset != 0) {
-                       ret = srp_finish_mapping(state, ch);
+                       ret = srp_map_finish_fmr(state, ch);
                        if (ret)
                                return ret;
                }
@@ -1411,51 +1409,83 @@ static int srp_map_sg_entry(struct srp_map_state *state,
         */
        ret = 0;
        if (len != dev->mr_page_size)
-               ret = srp_finish_mapping(state, ch);
+               ret = srp_map_finish_fmr(state, ch);
        return ret;
 }
 
-static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
-                     struct srp_request *req, struct scatterlist *scat,
-                     int count)
+static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
+                         struct srp_request *req, struct scatterlist *scat,
+                         int count)
 {
-       struct srp_target_port *target = ch->target;
-       struct srp_device *dev = target->srp_host->srp_dev;
        struct scatterlist *sg;
        int i, ret;
 
-       state->desc     = req->indirect_desc;
-       state->pages    = req->map_page;
-       if (dev->use_fast_reg) {
-               state->fr.next = req->fr_list;
-               state->fr.end = req->fr_list + target->cmd_sg_cnt;
-       } else if (dev->use_fmr) {
-               state->fmr.next = req->fmr_list;
-               state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
-       }
+       state->desc = req->indirect_desc;
+       state->pages = req->map_page;
+       state->fmr.next = req->fmr_list;
+       state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
 
-       if (dev->use_fast_reg || dev->use_fmr) {
-               for_each_sg(scat, sg, count, i) {
-                       ret = srp_map_sg_entry(state, ch, sg, i);
-                       if (ret)
-                               goto out;
-               }
-               ret = srp_finish_mapping(state, ch);
+       for_each_sg(scat, sg, count, i) {
+               ret = srp_map_sg_entry(state, ch, sg, i);
                if (ret)
-                       goto out;
-       } else {
-               for_each_sg(scat, sg, count, i) {
-                       srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
-                                    ib_sg_dma_len(dev->dev, sg),
-                                    target->global_mr->rkey);
-               }
+                       return ret;
        }
 
+       ret = srp_map_finish_fmr(state, ch);
+       if (ret)
+               return ret;
+
        req->nmdesc = state->nmdesc;
-       ret = 0;
 
-out:
-       return ret;
+       return 0;
+}
+
+static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
+                        struct srp_request *req, struct scatterlist *scat,
+                        int count)
+{
+       state->desc = req->indirect_desc;
+       state->fr.next = req->fr_list;
+       state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
+       state->sg = scat;
+       state->sg_nents = scsi_sg_count(req->scmnd);
+
+       while (state->sg_nents) {
+               int i, n;
+
+               n = srp_map_finish_fr(state, ch);
+               if (unlikely(n < 0))
+                       return n;
+
+               state->sg_nents -= n;
+               for (i = 0; i < n; i++)
+                       state->sg = sg_next(state->sg);
+       }
+
+       req->nmdesc = state->nmdesc;
+
+       return 0;
+}
+
+static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
+                         struct srp_request *req, struct scatterlist *scat,
+                         int count)
+{
+       struct srp_target_port *target = ch->target;
+       struct srp_device *dev = target->srp_host->srp_dev;
+       struct scatterlist *sg;
+       int i;
+
+       state->desc = req->indirect_desc;
+       for_each_sg(scat, sg, count, i) {
+               srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
+                            ib_sg_dma_len(dev->dev, sg),
+                            target->global_mr->rkey);
+       }
+
+       req->nmdesc = state->nmdesc;
+
+       return 0;
 }
 
 /*
@@ -1474,6 +1504,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
        struct srp_map_state state;
        struct srp_direct_buf idb_desc;
        u64 idb_pages[1];
+       struct scatterlist idb_sg[1];
        int ret;
 
        memset(&state, 0, sizeof(state));
@@ -1481,20 +1512,32 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
        state.gen.next = next_mr;
        state.gen.end = end_mr;
        state.desc = &idb_desc;
-       state.pages = idb_pages;
-       state.pages[0] = (req->indirect_dma_addr &
-                         dev->mr_page_mask);
-       state.npages = 1;
        state.base_dma_addr = req->indirect_dma_addr;
        state.dma_len = idb_len;
-       ret = srp_finish_mapping(&state, ch);
-       if (ret < 0)
-               goto out;
+
+       if (dev->use_fast_reg) {
+               state.sg = idb_sg;
+               state.sg_nents = 1;
+               sg_set_buf(idb_sg, req->indirect_desc, idb_len);
+               idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
+               ret = srp_map_finish_fr(&state, ch);
+               if (ret < 0)
+                       return ret;
+       } else if (dev->use_fmr) {
+               state.pages = idb_pages;
+               state.pages[0] = (req->indirect_dma_addr &
+                                 dev->mr_page_mask);
+               state.npages = 1;
+               ret = srp_map_finish_fmr(&state, ch);
+               if (ret < 0)
+                       return ret;
+       } else {
+               return -EINVAL;
+       }
 
        *idb_rkey = idb_desc.key;
 
-out:
-       return ret;
+       return 0;
 }
 
 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
@@ -1563,7 +1606,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
                                   target->indirect_size, DMA_TO_DEVICE);
 
        memset(&state, 0, sizeof(state));
-       srp_map_sg(&state, ch, req, scat, count);
+       if (dev->use_fast_reg)
+               srp_map_sg_fr(&state, ch, req, scat, count);
+       else if (dev->use_fmr)
+               srp_map_sg_fmr(&state, ch, req, scat, count);
+       else
+               srp_map_sg_dma(&state, ch, req, scat, count);
 
        /* We've mapped the request, now pull as much of the indirect
         * descriptor table as we can into the command buffer. If this
@@ -2750,7 +2798,6 @@ static struct scsi_host_template srp_template = {
        .cmd_per_lun                    = SRP_DEFAULT_CMD_SQ_SIZE,
        .use_clustering                 = ENABLE_CLUSTERING,
        .shost_attrs                    = srp_host_attrs,
-       .use_blk_tags                   = 1,
        .track_queue_depth              = 1,
 };
 
@@ -3181,10 +3228,6 @@ static ssize_t srp_create_target(struct device *dev,
        if (ret)
                goto out;
 
-       ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
-       if (ret)
-               goto out;
-
        target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
 
        if (!srp_conn_unique(target->srp_host, target)) {
@@ -3213,7 +3256,7 @@ static ssize_t srp_create_target(struct device *dev,
        INIT_WORK(&target->tl_err_work, srp_tl_err_work);
        INIT_WORK(&target->remove_work, srp_remove_work);
        spin_lock_init(&target->lock);
-       ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
+       ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
        if (ret)
                goto out;