]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/nvme/host/rdma.c
nvme: make fabrics command run on a separate request queue
[linux.git] / drivers / nvme / host / rdma.c
index a249db528d543dcad37e9b5162cc4b196a2f41e1..0ef05a75c428f6aab949928061db54e20dcc2a89 100644 (file)
@@ -751,6 +751,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
 {
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.admin_q);
+               blk_cleanup_queue(ctrl->ctrl.fabrics_q);
                blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
        }
        if (ctrl->async_event_sqe.data) {
@@ -792,10 +793,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
                        goto out_free_async_qe;
                }
 
+               ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+               if (IS_ERR(ctrl->ctrl.fabrics_q)) {
+                       error = PTR_ERR(ctrl->ctrl.fabrics_q);
+                       goto out_free_tagset;
+               }
+
                ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
                if (IS_ERR(ctrl->ctrl.admin_q)) {
                        error = PTR_ERR(ctrl->ctrl.admin_q);
-                       goto out_free_tagset;
+                       goto out_cleanup_fabrics_q;
                }
        }
 
@@ -803,24 +810,15 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (error)
                goto out_cleanup_queue;
 
-       error = ctrl->ctrl.ops->reg_read64(&ctrl->ctrl, NVME_REG_CAP,
-                       &ctrl->ctrl.cap);
-       if (error) {
-               dev_err(ctrl->ctrl.device,
-                       "prop_get NVME_REG_CAP failed\n");
-               goto out_stop_queue;
-       }
-
-       ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
-
-       error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+       error = nvme_enable_ctrl(&ctrl->ctrl);
        if (error)
                goto out_stop_queue;
 
        ctrl->ctrl.max_hw_sectors =
                (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
 
+       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
        error = nvme_init_identify(&ctrl->ctrl);
        if (error)
                goto out_stop_queue;
@@ -832,6 +830,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 out_cleanup_queue:
        if (new)
                blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_cleanup_fabrics_q:
+       if (new)
+               blk_cleanup_queue(ctrl->ctrl.fabrics_q);
 out_free_tagset:
        if (new)
                blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
@@ -901,10 +902,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
 {
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
        nvme_rdma_stop_queue(&ctrl->queues[0]);
-       if (ctrl->ctrl.admin_tagset)
+       if (ctrl->ctrl.admin_tagset) {
                blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
                        nvme_cancel_request, &ctrl->ctrl);
-       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+               blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
+       }
+       if (remove)
+               blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        nvme_rdma_destroy_admin_queue(ctrl, remove);
 }
 
@@ -914,9 +918,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
                nvme_rdma_stop_io_queues(ctrl);
-               if (ctrl->ctrl.tagset)
+               if (ctrl->ctrl.tagset) {
                        blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
                                nvme_cancel_request, &ctrl->ctrl);
+                       blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
+               }
                if (remove)
                        nvme_start_queues(&ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, remove);
@@ -1053,6 +1059,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        nvme_rdma_teardown_io_queues(ctrl, false);
        nvme_start_queues(&ctrl->ctrl);
        nvme_rdma_teardown_admin_queue(ctrl, false);
+       blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                /* state change failure is ok if we're in DELETING state */
@@ -1541,16 +1548,18 @@ static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
 
 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
 {
+       struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
        int ret;
 
        ret = nvme_rdma_create_queue_ib(queue);
        if (ret)
                return ret;
 
+       if (ctrl->opts->tos >= 0)
+               rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
        ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
        if (ret) {
-               dev_err(queue->ctrl->ctrl.device,
-                       "rdma_resolve_route failed (%d).\n",
+               dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
                        queue->cm_error);
                goto out_destroy_queue;
        }
@@ -1863,10 +1872,11 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
        cancel_delayed_work_sync(&ctrl->reconnect_work);
 
        nvme_rdma_teardown_io_queues(ctrl, shutdown);
+       blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
        if (shutdown)
                nvme_shutdown_ctrl(&ctrl->ctrl);
        else
-               nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+               nvme_disable_ctrl(&ctrl->ctrl);
        nvme_rdma_teardown_admin_queue(ctrl, shutdown);
 }
 
@@ -2045,7 +2055,8 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
        .required_opts  = NVMF_OPT_TRADDR,
        .allowed_opts   = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
                          NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
-                         NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES,
+                         NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
+                         NVMF_OPT_TOS,
        .create_ctrl    = nvme_rdma_create_ctrl,
 };