]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/nvme/host/rdma.c
nvme-rdma: fix queue mapping when queue count is limited
[linux.git] / drivers / nvme / host / rdma.c
index f383146e7d0fdfa40edb4e3671f1c208c082223c..26709a2ab5936567c0cfa0d15a96da951b40166d 100644 (file)
@@ -641,34 +641,16 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
 {
        struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
        struct ib_device *ibdev = ctrl->device->dev;
-       unsigned int nr_io_queues;
+       unsigned int nr_io_queues, nr_default_queues;
+       unsigned int nr_read_queues, nr_poll_queues;
        int i, ret;
 
-       nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
-
-       /*
-        * we map queues according to the device irq vectors for
-        * optimal locality so we don't need more queues than
-        * completion vectors.
-        */
-       nr_io_queues = min_t(unsigned int, nr_io_queues,
-                               ibdev->num_comp_vectors);
-
-       if (opts->nr_write_queues) {
-               ctrl->io_queues[HCTX_TYPE_DEFAULT] =
-                               min(opts->nr_write_queues, nr_io_queues);
-               nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
-       } else {
-               ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
-       }
-
-       ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
-
-       if (opts->nr_poll_queues) {
-               ctrl->io_queues[HCTX_TYPE_POLL] =
-                       min(opts->nr_poll_queues, num_online_cpus());
-               nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
-       }
+       nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
+                               min(opts->nr_io_queues, num_online_cpus()));
+       nr_default_queues =  min_t(unsigned int, ibdev->num_comp_vectors,
+                               min(opts->nr_write_queues, num_online_cpus()));
+       nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
+       nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
 
        ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
        if (ret)
@@ -681,6 +663,34 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
        dev_info(ctrl->ctrl.device,
                "creating %d I/O queues.\n", nr_io_queues);
 
+       if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
+               /*
+                * separate read/write queues
+                * hand out dedicated default queues only after we have
+                * sufficient read queues.
+                */
+               ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
+               nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
+               ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+                       min(nr_default_queues, nr_io_queues);
+               nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+       } else {
+               /*
+                * shared read/write queues
+                * either no write queues were requested, or we don't have
+                * sufficient queue count to have dedicated default queues.
+                */
+               ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+                       min(nr_read_queues, nr_io_queues);
+               nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+       }
+
+       if (opts->nr_poll_queues && nr_io_queues) {
+               /* map dedicated poll queues only if we have queues left */
+               ctrl->io_queues[HCTX_TYPE_POLL] =
+                       min(nr_poll_queues, nr_io_queues);
+       }
+
        for (i = 1; i < ctrl->ctrl.queue_count; i++) {
                ret = nvme_rdma_alloc_queue(ctrl, i,
                                ctrl->ctrl.sqsize + 1);
@@ -1763,17 +1773,24 @@ static void nvme_rdma_complete_rq(struct request *rq)
 static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
 {
        struct nvme_rdma_ctrl *ctrl = set->driver_data;
+       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
 
-       set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
-       set->map[HCTX_TYPE_DEFAULT].nr_queues =
-                       ctrl->io_queues[HCTX_TYPE_DEFAULT];
-       set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
-       if (ctrl->ctrl.opts->nr_write_queues) {
+       if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
                /* separate read/write queues */
+               set->map[HCTX_TYPE_DEFAULT].nr_queues =
+                       ctrl->io_queues[HCTX_TYPE_DEFAULT];
+               set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+               set->map[HCTX_TYPE_READ].nr_queues =
+                       ctrl->io_queues[HCTX_TYPE_READ];
                set->map[HCTX_TYPE_READ].queue_offset =
-                               ctrl->io_queues[HCTX_TYPE_DEFAULT];
+                       ctrl->io_queues[HCTX_TYPE_DEFAULT];
        } else {
-               /* mixed read/write queues */
+               /* shared read/write queues */
+               set->map[HCTX_TYPE_DEFAULT].nr_queues =
+                       ctrl->io_queues[HCTX_TYPE_DEFAULT];
+               set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+               set->map[HCTX_TYPE_READ].nr_queues =
+                       ctrl->io_queues[HCTX_TYPE_DEFAULT];
                set->map[HCTX_TYPE_READ].queue_offset = 0;
        }
        blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1781,16 +1798,22 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
        blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
                        ctrl->device->dev, 0);
 
-       if (ctrl->ctrl.opts->nr_poll_queues) {
+       if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
+               /* map dedicated poll queues only if we have queues left */
                set->map[HCTX_TYPE_POLL].nr_queues =
                                ctrl->io_queues[HCTX_TYPE_POLL];
                set->map[HCTX_TYPE_POLL].queue_offset =
-                               ctrl->io_queues[HCTX_TYPE_DEFAULT];
-               if (ctrl->ctrl.opts->nr_write_queues)
-                       set->map[HCTX_TYPE_POLL].queue_offset +=
-                               ctrl->io_queues[HCTX_TYPE_READ];
+                       ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+                       ctrl->io_queues[HCTX_TYPE_READ];
                blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
        }
+
+       dev_info(ctrl->ctrl.device,
+               "mapped %d/%d/%d default/read/poll queues.\n",
+               ctrl->io_queues[HCTX_TYPE_DEFAULT],
+               ctrl->io_queues[HCTX_TYPE_READ],
+               ctrl->io_queues[HCTX_TYPE_POLL]);
+
        return 0;
 }