]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/scsi/qla2xxx/qla_os.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / drivers / scsi / qla2xxx / qla_os.c
index 56d6142852a553ed9ad8011cb4c18a84e8656e0d..40660461a4b5c3e56e61b124385ee044547a4741 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mutex.h>
 #include <linux/kobject.h>
 #include <linux/slab.h>
+#include <linux/blk-mq-pci.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -30,7 +31,7 @@ static int apidev_major;
 /*
  * SRB allocation cache
  */
-static struct kmem_cache *srb_cachep;
+struct kmem_cache *srb_cachep;
 
 /*
  * CT6 CTX allocation cache
@@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
                "Enables iIDMA settings "
                "Default is 1 - perform iIDMA. 0 - no iIDMA.");
 
-int ql2xmaxqueues = 1;
-module_param(ql2xmaxqueues, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmaxqueues,
-               "Enables MQ settings "
-               "Default is 1 for single queue. Set it to number "
-               "of queues in MQ mode.");
-
-int ql2xmultique_tag;
-module_param(ql2xmultique_tag, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmultique_tag,
-               "Enables CPU affinity settings for the driver "
-               "Default is 0 for no affinity of request and response IO. "
-               "Set it to 1 to turn on the cpu affinity.");
+int ql2xmqsupport = 1;
+module_param(ql2xmqsupport, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmqsupport,
+               "Enable on demand multiple queue pairs support "
+               "Default is 1 for supported. "
+               "Set it to 0 to turn off mq qpair support.");
 
 int ql2xfwloadbin;
 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
@@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
 static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
 static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+static int qla2xxx_map_queues(struct Scsi_Host *shost);
 
 struct scsi_host_template qla2xxx_driver_template = {
        .module                 = THIS_MODULE,
@@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = {
        .scan_finished          = qla2xxx_scan_finished,
        .scan_start             = qla2xxx_scan_start,
        .change_queue_depth     = scsi_change_queue_depth,
+       .map_queues             = qla2xxx_map_queues,
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
@@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
        struct req_que **, struct rsp_que **);
 static void qla2x00_free_fw_dump(struct qla_hw_data *);
 static void qla2x00_mem_free(struct qla_hw_data *);
+int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+       struct qla_qpair *qpair);
 
 /* -------------------------------------------------------------------------- */
 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
@@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
                    "Unable to allocate memory for response queue ptrs.\n");
                goto fail_rsp_map;
        }
+
+       if (ql2xmqsupport && ha->max_qpairs) {
+               ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
+                       GFP_KERNEL);
+               if (!ha->queue_pair_map) {
+                       ql_log(ql_log_fatal, vha, 0x0180,
+                           "Unable to allocate memory for queue pair ptrs.\n");
+                       goto fail_qpair_map;
+               }
+               ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+               if (ha->base_qpair == NULL) {
+                       ql_log(ql_log_warn, vha, 0x0182,
+                           "Failed to allocate base queue pair memory.\n");
+                       goto fail_base_qpair;
+               }
+               ha->base_qpair->req = req;
+               ha->base_qpair->rsp = rsp;
+       }
+
        /*
         * Make sure we record at least the request and response queue zero in
         * case we need to free them if part of the probe fails.
@@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
        set_bit(0, ha->req_qid_map);
        return 1;
 
+fail_base_qpair:
+       kfree(ha->queue_pair_map);
+fail_qpair_map:
+       kfree(ha->rsp_q_map);
+       ha->rsp_q_map = NULL;
 fail_rsp_map:
        kfree(ha->req_q_map);
        ha->req_q_map = NULL;
@@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
        struct req_que *req;
        struct rsp_que *rsp;
        int cnt;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
                if (!test_bit(cnt, ha->req_qid_map))
                        continue;
 
                req = ha->req_q_map[cnt];
+               clear_bit(cnt, ha->req_qid_map);
+               ha->req_q_map[cnt] = NULL;
+
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_req_que(ha, req);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        kfree(ha->req_q_map);
        ha->req_q_map = NULL;
 
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
                if (!test_bit(cnt, ha->rsp_qid_map))
                        continue;
 
                rsp = ha->rsp_q_map[cnt];
+               clear_bit(cnt, ha->rsp_qid_map);
+               ha->rsp_q_map[cnt] =  NULL;
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_rsp_que(ha, rsp);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        }
-       kfree(ha->rsp_q_map);
-       ha->rsp_q_map = NULL;
-}
-
-static int qla25xx_setup_mode(struct scsi_qla_host *vha)
-{
-       uint16_t options = 0;
-       int ques, req, ret;
-       struct qla_hw_data *ha = vha->hw;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (!(ha->fw_attributes & BIT_6)) {
-               ql_log(ql_log_warn, vha, 0x00d8,
-                   "Firmware is not multi-queue capable.\n");
-               goto fail;
-       }
-       if (ql2xmultique_tag) {
-               /* create a request queue for IO */
-               options |= BIT_7;
-               req = qla25xx_create_req_que(ha, options, 0, 0, -1,
-                       QLA_DEFAULT_QUE_QOS);
-               if (!req) {
-                       ql_log(ql_log_warn, vha, 0x00e0,
-                           "Failed to create request queue.\n");
-                       goto fail;
-               }
-               ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
-               vha->req = ha->req_q_map[req];
-               options |= BIT_1;
-               for (ques = 1; ques < ha->max_rsp_queues; ques++) {
-                       ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
-                       if (!ret) {
-                               ql_log(ql_log_warn, vha, 0x00e8,
-                                   "Failed to create response queue.\n");
-                               goto fail2;
-                       }
-               }
-               ha->flags.cpu_affinity_enabled = 1;
-               ql_dbg(ql_dbg_multiq, vha, 0xc007,
-                   "CPU affinity mode enabled, "
-                   "no. of response queues:%d no. of request queues:%d.\n",
-                   ha->max_rsp_queues, ha->max_req_queues);
-               ql_dbg(ql_dbg_init, vha, 0x00e9,
-                   "CPU affinity mode enabled, "
-                   "no. of response queues:%d no. of request queues:%d.\n",
-                   ha->max_rsp_queues, ha->max_req_queues);
-       }
-       return 0;
-fail2:
-       qla25xx_delete_queues(vha);
-       destroy_workqueue(ha->wq);
-       ha->wq = NULL;
-       vha->req = ha->req_q_map[0];
-fail:
-       ha->mqenable = 0;
-       kfree(ha->req_q_map);
        kfree(ha->rsp_q_map);
-       ha->max_req_queues = ha->max_rsp_queues = 1;
-       return 1;
+       ha->rsp_q_map = NULL;
 }
 
 static char *
@@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-static void
+void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
        struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
        cmd->scsi_done(cmd);
 }
 
+void
+qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct qla_hw_data *ha = sp->fcport->vha->hw;
+       void *ctx = GET_CMD_CTX_SP(sp);
+
+       if (sp->flags & SRB_DMA_VALID) {
+               scsi_dma_unmap(cmd);
+               sp->flags &= ~SRB_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+               dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+                   scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+               sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+               /* List assured to be having elements */
+               qla2x00_clean_dsd_pool(ha, sp, NULL);
+               sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+               dma_pool_free(ha->dl_dma_pool, ctx,
+                   ((struct crc_context *)ctx)->crc_ctx_dma);
+               sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+               struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+               dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+                   ctx1->fcp_cmnd_dma);
+               list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+               ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+               ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+               mempool_free(ctx1, ha->ctx_mempool);
+       }
+
+       CMD_SP(cmd) = NULL;
+       qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void
+qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+       cmd->result = res;
+
+       if (atomic_read(&sp->ref_count) == 0) {
+               ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
+                   "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+                   sp, GET_CMD_SP(sp));
+               if (ql2xextended_error_logging & ql_dbg_io)
+                       WARN_ON(atomic_read(&sp->ref_count) == 0);
+               return;
+       }
+       if (!atomic_dec_and_test(&sp->ref_count))
+               return;
+
+       qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+       cmd->scsi_done(cmd);
+}
+
 /* If we are SP1 here, we need to still take and release the host_lock as SP1
  * does not have the changes necessary to avoid taking host->host_lock.
  */
@@ -706,12 +758,28 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
        srb_t *sp;
        int rval;
+       struct qla_qpair *qpair = NULL;
+       uint32_t tag;
+       uint16_t hwq;
 
        if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
                cmd->result = DID_NO_CONNECT << 16;
                goto qc24_fail_command;
        }
 
+       if (ha->mqenable) {
+               if (shost_use_blk_mq(vha->host)) {
+                       tag = blk_mq_unique_tag(cmd->request);
+                       hwq = blk_mq_unique_tag_to_hwq(tag);
+                       qpair = ha->queue_pair_map[hwq];
+               } else if (vha->vp_idx && vha->qpair) {
+                       qpair = vha->qpair;
+               }
+
+               if (qpair)
+                       return qla2xxx_mqueuecommand(host, cmd, qpair);
+       }
+
        if (ha->flags.eeh_busy) {
                if (ha->flags.pci_channel_io_perm_failure) {
                        ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -808,6 +876,95 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        return 0;
 }
 
+/* For MQ supported I/O */
+int
+qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+    struct qla_qpair *qpair)
+{
+       scsi_qla_host_t *vha = shost_priv(host);
+       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+       struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+       struct qla_hw_data *ha = vha->hw;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+       srb_t *sp;
+       int rval;
+
+       rval = fc_remote_port_chkready(rport);
+       if (rval) {
+               cmd->result = rval;
+               ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
+                   "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+                   cmd, rval);
+               goto qc24_fail_command;
+       }
+
+       if (!fcport) {
+               cmd->result = DID_NO_CONNECT << 16;
+               goto qc24_fail_command;
+       }
+
+       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+               if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+                       atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+                       ql_dbg(ql_dbg_io, vha, 0x3077,
+                           "Returning DNC, fcport_state=%d loop_state=%d.\n",
+                           atomic_read(&fcport->state),
+                           atomic_read(&base_vha->loop_state));
+                       cmd->result = DID_NO_CONNECT << 16;
+                       goto qc24_fail_command;
+               }
+               goto qc24_target_busy;
+       }
+
+       /*
+        * Return target busy if we've received a non-zero retry_delay_timer
+        * in a FCP_RSP.
+        */
+       if (fcport->retry_delay_timestamp == 0) {
+               /* retry delay not set */
+       } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+               fcport->retry_delay_timestamp = 0;
+       else
+               goto qc24_target_busy;
+
+       sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+       if (!sp)
+               goto qc24_host_busy;
+
+       sp->u.scmd.cmd = cmd;
+       sp->type = SRB_SCSI_CMD;
+       atomic_set(&sp->ref_count, 1);
+       CMD_SP(cmd) = (void *)sp;
+       sp->free = qla2xxx_qpair_sp_free_dma;
+       sp->done = qla2xxx_qpair_sp_compl;
+       sp->qpair = qpair;
+
+       rval = ha->isp_ops->start_scsi_mq(sp);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+                   "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+               if (rval == QLA_INTERFACE_ERROR)
+                       goto qc24_fail_command;
+               goto qc24_host_busy_free_sp;
+       }
+
+       return 0;
+
+qc24_host_busy_free_sp:
+       qla2xxx_qpair_sp_free_dma(vha, sp);
+
+qc24_host_busy:
+       return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+       return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+       cmd->scsi_done(cmd);
+
+       return 0;
+}
+
 /*
  * qla2x00_eh_wait_on_command
  *    Waits for the command to be returned by the Firmware for some
@@ -1459,7 +1616,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                                /* Don't abort commands in adapter during EEH
                                 * recovery as it's not accessible/responding.
                                 */
-                               if (!ha->flags.eeh_busy) {
+                               if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
                                        /* Get a reference to the sp and drop the lock.
                                         * The reference ensures this sp->done() call
                                         * - and not the call in qla2xxx_eh_abort() -
@@ -1601,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
 {
        resource_size_t pio;
        uint16_t msix;
-       int cpus;
 
        if (pci_request_selected_regions(ha->pdev, ha->bars,
            QLA2XXX_DRIVER_NAME)) {
@@ -1658,9 +1814,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
 
        /* Determine queue resources */
        ha->max_req_queues = ha->max_rsp_queues = 1;
-       if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
-               (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
-               (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+       if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
                goto mqiobase_exit;
 
        ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -1670,26 +1824,18 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
                    "MQIO Base=%p.\n", ha->mqiobase);
                /* Read MSIX vector size of the board */
                pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
-               ha->msix_count = msix;
+               ha->msix_count = msix + 1;
                /* Max queues are bounded by available msix vectors */
-               /* queue 0 uses two msix vectors */
-               if (ql2xmultique_tag) {
-                       cpus = num_online_cpus();
-                       ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
-                               (cpus + 1) : (ha->msix_count - 1);
-                       ha->max_req_queues = 2;
-               } else if (ql2xmaxqueues > 1) {
-                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
-                           QLA_MQ_SIZE : ql2xmaxqueues;
-                       ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-               }
+               /* MB interrupt uses 1 vector */
+               ha->max_req_queues = ha->msix_count - 1;
+               ha->max_rsp_queues = ha->max_req_queues;
+               /* Queue pairs is the max value minus the base queue pair */
+               ha->max_qpairs = ha->max_rsp_queues - 1;
+               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
+                   "Max no of queues pairs: %d.\n", ha->max_qpairs);
+
                ql_log_pci(ql_log_info, ha->pdev, 0x001a,
-                   "MSI-X vector count: %d.\n", msix);
+                   "MSI-X vector count: %d.\n", ha->msix_count);
        } else
                ql_log_pci(ql_log_info, ha->pdev, 0x001b,
                    "BAR 3 not enabled.\n");
@@ -1709,7 +1855,6 @@ static int
 qla83xx_iospace_config(struct qla_hw_data *ha)
 {
        uint16_t msix;
-       int cpus;
 
        if (pci_request_selected_regions(ha->pdev, ha->bars,
            QLA2XXX_DRIVER_NAME)) {
@@ -1761,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
                /* Read MSIX vector size of the board */
                pci_read_config_word(ha->pdev,
                    QLA_83XX_PCI_MSIX_CONTROL, &msix);
-               ha->msix_count = msix;
-               /* Max queues are bounded by available msix vectors */
-               /* queue 0 uses two msix vectors */
-               if (ql2xmultique_tag) {
-                       cpus = num_online_cpus();
-                       ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
-                               (cpus + 1) : (ha->msix_count - 1);
-                       ha->max_req_queues = 2;
-               } else if (ql2xmaxqueues > 1) {
-                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
-                                               QLA_MQ_SIZE : ql2xmaxqueues;
-                       ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
+               ha->msix_count = msix + 1;
+               /*
+                * By default, driver uses at least two msix vectors
+                * (default & rspq)
+                */
+               if (ql2xmqsupport) {
+                       /* MB interrupt uses 1 vector */
+                       ha->max_req_queues = ha->msix_count - 1;
+                       ha->max_rsp_queues = ha->max_req_queues;
+
+                       /* ATIOQ needs 1 vector. That's 1 less QPair */
+                       if (QLA_TGT_MODE_ENABLED())
+                               ha->max_req_queues--;
+
+                       /* Queue pairs is the max value minus
+                        * the base queue pair */
+                       ha->max_qpairs = ha->max_req_queues - 1;
+                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+                           "Max no of queues pairs: %d.\n", ha->max_qpairs);
                }
                ql_log_pci(ql_log_info, ha->pdev, 0x011c,
-                   "MSI-X vector count: %d.\n", msix);
+                   "MSI-X vector count: %d.\n", ha->msix_count);
        } else
                ql_log_pci(ql_log_info, ha->pdev, 0x011e,
                    "BAR 1 not enabled.\n");
 
 mqiobase_exit:
        ha->msix_count = ha->max_rsp_queues + 1;
+       if (QLA_TGT_MODE_ENABLED())
+               ha->msix_count++;
 
        qlt_83xx_iospace_config(ha);
 
@@ -1831,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = {
        .write_optrom           = qla2x00_write_optrom_data,
        .get_flash_version      = qla2x00_get_flash_version,
        .start_scsi             = qla2x00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1869,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = {
        .write_optrom           = qla2x00_write_optrom_data,
        .get_flash_version      = qla2x00_get_flash_version,
        .start_scsi             = qla2x00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1907,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1945,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1983,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2021,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = {
        .write_optrom           = qla82xx_write_optrom_data,
        .get_flash_version      = qla82xx_get_flash_version,
        .start_scsi             = qla82xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla82xx_abort_isp,
        .iospace_config         = qla82xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2059,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = {
        .write_optrom           = qla8044_write_optrom_data,
        .get_flash_version      = qla82xx_get_flash_version,
        .start_scsi             = qla82xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla8044_abort_isp,
        .iospace_config         = qla82xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2097,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla83xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2135,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qlafx00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qlafx00_abort_isp,
        .iospace_config         = qlafx00_iospace_config,
        .initialize_adapter     = qlafx00_initialize_adapter,
@@ -2173,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla83xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2387,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        uint16_t req_length = 0, rsp_length = 0;
        struct req_que *req = NULL;
        struct rsp_que *rsp = NULL;
+       int i;
+
        bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
        sht = &qla2xxx_driver_template;
        if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2650,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            "Found an ISP%04X irq %d iobase 0x%p.\n",
            pdev->device, pdev->irq, ha->iobase);
        mutex_init(&ha->vport_lock);
+       mutex_init(&ha->mq_lock);
        init_completion(&ha->mbx_cmd_comp);
        complete(&ha->mbx_cmd_comp);
        init_completion(&ha->mbx_intr_comp);
@@ -2737,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->max_cmd_len, host->max_channel, host->max_lun,
            host->transportt, sht->vendor_id);
 
-que_init:
+       /* Set up the irqs */
+       ret = qla2x00_request_irqs(ha, rsp);
+       if (ret)
+               goto probe_init_failed;
+
        /* Alloc arrays of request and response ring ptrs */
        if (!qla2x00_alloc_queues(ha, req, rsp)) {
                ql_log(ql_log_fatal, base_vha, 0x003d,
@@ -2746,12 +2912,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto probe_init_failed;
        }
 
-       qlt_probe_one_stage1(base_vha, ha);
+       if (ha->mqenable && shost_use_blk_mq(host)) {
+               /* number of hardware queues supported by blk/scsi-mq*/
+               host->nr_hw_queues = ha->max_qpairs;
 
-       /* Set up the irqs */
-       ret = qla2x00_request_irqs(ha, rsp);
-       if (ret)
-               goto probe_init_failed;
+               ql_dbg(ql_dbg_init, base_vha, 0x0192,
+                       "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
+       } else
+               ql_dbg(ql_dbg_init, base_vha, 0x0193,
+                       "blk/scsi-mq disabled.\n");
+
+       qlt_probe_one_stage1(base_vha, ha);
 
        pci_save_state(pdev);
 
@@ -2842,11 +3013,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->can_queue, base_vha->req,
            base_vha->mgmt_svr_loop_id, host->sg_tablesize);
 
-       if (ha->mqenable) {
-               if (qla25xx_setup_mode(base_vha)) {
-                       ql_log(ql_log_warn, base_vha, 0x00ec,
-                           "Failed to create queues, falling back to single queue mode.\n");
-                       goto que_init;
+       if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+               ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+               /* Create start of day qpairs for Block MQ */
+               if (shost_use_blk_mq(host)) {
+                       for (i = 0; i < ha->max_qpairs; i++)
+                               qla2xxx_create_qpair(base_vha, 5, 0);
                }
        }
 
@@ -3115,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
 static void
 qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
 {
-       /* Flush the work queue and remove it */
-       if (ha->wq) {
-               flush_workqueue(ha->wq);
-               destroy_workqueue(ha->wq);
-               ha->wq = NULL;
-       }
-
        /* Cancel all work and destroy DPC workqueues */
        if (ha->dpc_lp_wq) {
                cancel_work_sync(&ha->idc_aen);
@@ -3317,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha)
                ha->isp_ops->disable_intrs(ha);
        }
 
+       qla2x00_free_fcports(vha);
+
        qla2x00_free_irqs(vha);
 
-       qla2x00_free_fcports(vha);
+       /* Flush the work queue and remove it */
+       if (ha->wq) {
+               flush_workqueue(ha->wq);
+               destroy_workqueue(ha->wq);
+               ha->wq = NULL;
+       }
+
 
        qla2x00_mem_free(ha);
 
@@ -3489,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                                sizeof(struct ct6_dsd), 0,
                                SLAB_HWCACHE_ALIGN, NULL);
                        if (!ctx_cachep)
-                               goto fail_free_gid_list;
+                               goto fail_free_srb_mempool;
                }
                ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
                        ctx_cachep);
@@ -3642,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
            GFP_KERNEL);
        if (!ha->loop_id_map)
-               goto fail_async_pd;
+               goto fail_loop_id_map;
        else {
                qla2x00_set_reserved_loop_ids(ha);
                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3651,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 
        return 0;
 
+fail_loop_id_map:
+       dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
 fail_async_pd:
        dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
 fail_ex_init_cb:
@@ -3678,6 +3853,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
        ha->ms_iocb = NULL;
        ha->ms_iocb_dma = 0;
+
+       if (ha->sns_cmd)
+               dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+                   ha->sns_cmd, ha->sns_cmd_dma);
 fail_dma_pool:
        if (IS_QLA82XX(ha) || ql2xenabledif) {
                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3695,10 +3874,12 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        kfree(ha->nvram);
        ha->nvram = NULL;
 fail_free_ctx_mempool:
-       mempool_destroy(ha->ctx_mempool);
+       if (ha->ctx_mempool)
+               mempool_destroy(ha->ctx_mempool);
        ha->ctx_mempool = NULL;
 fail_free_srb_mempool:
-       mempool_destroy(ha->srb_mempool);
+       if (ha->srb_mempool)
+               mempool_destroy(ha->srb_mempool);
        ha->srb_mempool = NULL;
 fail_free_gid_list:
        dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -4034,6 +4215,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
        INIT_LIST_HEAD(&vha->logo_list);
        INIT_LIST_HEAD(&vha->plogi_ack_list);
+       INIT_LIST_HEAD(&vha->qp_list);
 
        spin_lock_init(&vha->work_lock);
        spin_lock_init(&vha->cmd_list_lock);
@@ -5038,8 +5220,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
 
        base_vha->flags.init_done = 0;
        qla25xx_delete_queues(base_vha);
-       qla2x00_free_irqs(base_vha);
        qla2x00_free_fcports(base_vha);
+       qla2x00_free_irqs(base_vha);
        qla2x00_mem_free(ha);
        qla82xx_md_free(base_vha);
        qla2x00_free_queues(ha);
@@ -5073,6 +5255,8 @@ qla2x00_do_dpc(void *data)
 {
        scsi_qla_host_t *base_vha;
        struct qla_hw_data *ha;
+       uint32_t online;
+       struct qla_qpair *qpair;
 
        ha = (struct qla_hw_data *)data;
        base_vha = pci_get_drvdata(ha->pdev);
@@ -5334,6 +5518,22 @@ qla2x00_do_dpc(void *data)
                                ha->isp_ops->beacon_blink(base_vha);
                }
 
+               /* qpair online check */
+               if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
+                   &base_vha->dpc_flags)) {
+                       if (ha->flags.eeh_busy ||
+                           ha->flags.pci_channel_io_perm_failure)
+                               online = 0;
+                       else
+                               online = 1;
+
+                       mutex_lock(&ha->mq_lock);
+                       list_for_each_entry(qpair, &base_vha->qp_list,
+                           qp_list_elem)
+                       qpair->online = online;
+                       mutex_unlock(&ha->mq_lock);
+               }
+
                if (!IS_QLAFX00(ha))
                        qla2x00_do_dpc_all_vps(base_vha);
 
@@ -5676,6 +5876,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
        switch (state) {
        case pci_channel_io_normal:
                ha->flags.eeh_busy = 0;
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_CAN_RECOVER;
        case pci_channel_io_frozen:
                ha->flags.eeh_busy = 1;
@@ -5689,10 +5893,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                pci_disable_device(pdev);
                /* Return back all IOs */
                qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_NEED_RESET;
        case pci_channel_io_perm_failure:
                ha->flags.pci_channel_io_perm_failure = 1;
                qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_DISCONNECT;
        }
        return PCI_ERS_RESULT_NEED_RESET;
@@ -5960,6 +6172,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
        qla83xx_wr_reg(vha, reg, data);
 }
 
+static int qla2xxx_map_queues(struct Scsi_Host *shost)
+{
+       scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+       return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+}
+
 static const struct pci_error_handlers qla2xxx_err_handler = {
        .error_detected = qla2xxx_pci_error_detected,
        .mmio_enabled = qla2xxx_pci_mmio_enabled,