]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/nvme/host/fc.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / drivers / nvme / host / fc.c
index 771e2e76187222dfb71616f5665c7b2b22802c74..e65041c640cbc5bad3c284b77690605f071edfc3 100644 (file)
@@ -1491,19 +1491,20 @@ static int
 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
 {
        struct nvme_fc_queue *queue = &ctrl->queues[1];
-       int i, j, ret;
+       int i, ret;
 
        for (i = 1; i < ctrl->queue_count; i++, queue++) {
                ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
-               if (ret) {
-                       for (j = i-1; j >= 0; j--)
-                               __nvme_fc_delete_hw_queue(ctrl,
-                                               &ctrl->queues[j], j);
-                       return ret;
-               }
+               if (ret)
+                       goto delete_queues;
        }
 
        return 0;
+
+delete_queues:
+       for (; i >= 0; i--)
+               __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
+       return ret;
 }
 
 static int
@@ -1653,23 +1654,22 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
                struct nvme_fc_fcp_op *op)
 {
        struct nvmefc_fcp_req *freq = &op->fcp_req;
-       u32 map_len = nvme_map_len(rq);
        enum dma_data_direction dir;
        int ret;
 
        freq->sg_cnt = 0;
 
-       if (!map_len)
+       if (!blk_rq_payload_bytes(rq))
                return 0;
 
        freq->sg_table.sgl = freq->first_sgl;
-       ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
-                       freq->sg_table.sgl);
+       ret = sg_alloc_table_chained(&freq->sg_table,
+                       blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
        if (ret)
                return -ENOMEM;
 
        op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
-       WARN_ON(op->nents > rq->nr_phys_segments);
+       WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
        dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, dir);
@@ -1853,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret)
                return ret;
 
-       data_len = nvme_map_len(rq);
+       data_len = blk_rq_payload_bytes(rq);
        if (data_len)
                io_dir = ((rq_data_dir(rq) == WRITE) ?
                                        NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
@@ -2401,8 +2401,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        WARN_ON_ONCE(!changed);
 
        dev_info(ctrl->ctrl.device,
-               "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
-               ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+               "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
+               ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
 
        kref_get(&ctrl->ctrl.kref);