]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/scsi/lpfc/lpfc_init.c
scsi: lpfc: Fix error codes in lpfc_sli4_pci_mem_setup()
[linux.git] / drivers / scsi / lpfc / lpfc_init.c
index d9db29817f6b190f5e26fd3504727d81410164b6..bbc2815fc0128a745484c7dc3fafd2330174f781 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -37,6 +37,7 @@
 #include <linux/miscdevice.h>
 #include <linux/percpu.h>
 #include <linux/msi.h>
+#include <linux/irq.h>
 #include <linux/bitops.h>
 
 #include <scsi/scsi.h>
@@ -92,6 +93,8 @@ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
+static uint16_t lpfc_find_eq_handle(struct lpfc_hba *, uint16_t);
+static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1248,6 +1251,69 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
        return;
 }
 
+static void
+lpfc_hb_eq_delay_work(struct work_struct *work)
+{
+       struct lpfc_hba *phba = container_of(to_delayed_work(work),
+                                            struct lpfc_hba, eq_delay_work);
+       struct lpfc_eq_intr_info *eqi, *eqi_new;
+       struct lpfc_queue *eq, *eq_next;
+       unsigned char *eqcnt = NULL;
+       uint32_t usdelay;
+       int i;
+
+       if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
+               return;
+
+       if (phba->link_state == LPFC_HBA_ERROR ||
+           phba->pport->fc_flag & FC_OFFLINE_MODE)
+               goto requeue;
+
+       eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
+                       GFP_KERNEL);
+       if (!eqcnt)
+               goto requeue;
+
+       for (i = 0; i < phba->cfg_irq_chann; i++) {
+               eq = phba->sli4_hba.hdwq[i].hba_eq;
+               if (eq && eqcnt[eq->last_cpu] < 2)
+                       eqcnt[eq->last_cpu]++;
+               continue;
+       }
+
+       for_each_present_cpu(i) {
+               if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2)
+                       continue;
+
+               eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
+
+               usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
+                          LPFC_EQ_DELAY_STEP;
+               if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+                       usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+
+               eqi->icnt = 0;
+
+               list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
+                       if (eq->last_cpu != i) {
+                               eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
+                                                     eq->last_cpu);
+                               list_move_tail(&eq->cpu_list, &eqi_new->list);
+                               continue;
+                       }
+                       if (usdelay != eq->q_mode)
+                               lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
+                                                        usdelay);
+               }
+       }
+
+       kfree(eqcnt);
+
+requeue:
+       queue_delayed_work(phba->wq, &phba->eq_delay_work,
+                          msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
+}
+
 /**
  * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
  * @phba: pointer to lpfc hba data structure.
@@ -1300,16 +1366,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
        int retval, i;
        struct lpfc_sli *psli = &phba->sli;
        LIST_HEAD(completions);
-       struct lpfc_queue *qp;
-       unsigned long time_elapsed;
-       uint32_t tick_cqe, max_cqe, val;
-       uint64_t tot, data1, data2, data3;
-       struct lpfc_nvmet_tgtport *tgtp;
-       struct lpfc_register reg_data;
-       struct nvme_fc_local_port *localport;
-       struct lpfc_nvme_lport *lport;
-       struct lpfc_fc4_ctrl_stat *cstat;
-       void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
 
        if (phba->cfg_xri_rebalancing) {
                /* Multi-XRI pools handler */
@@ -1329,104 +1385,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                (phba->pport->fc_flag & FC_OFFLINE_MODE))
                return;
 
-       if (phba->cfg_auto_imax) {
-               if (!phba->last_eqdelay_time) {
-                       phba->last_eqdelay_time = jiffies;
-                       goto skip_eqdelay;
-               }
-               time_elapsed = jiffies - phba->last_eqdelay_time;
-               phba->last_eqdelay_time = jiffies;
-
-               tot = 0xffff;
-               /* Check outstanding IO count */
-               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-                       if (phba->nvmet_support) {
-                               tgtp = phba->targetport->private;
-                               /* Calculate outstanding IOs */
-                               tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
-                               tot += atomic_read(&tgtp->xmt_fcp_release);
-                               tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
-                       } else {
-                               localport = phba->pport->localport;
-                               if (!localport || !localport->private)
-                                       goto skip_eqdelay;
-                               lport = (struct lpfc_nvme_lport *)
-                                       localport->private;
-                               tot = 0;
-                               for (i = 0;
-                                       i < phba->cfg_hdw_queue; i++) {
-                                       cstat =
-                                            &phba->sli4_hba.hdwq[i].nvme_cstat;
-                                       data1 = cstat->input_requests;
-                                       data2 = cstat->output_requests;
-                                       data3 = cstat->control_requests;
-                                       tot += (data1 + data2 + data3);
-                                       tot -= cstat->io_cmpls;
-                               }
-                       }
-               }
-
-               /* Interrupts per sec per EQ */
-               val = phba->cfg_fcp_imax / phba->cfg_hdw_queue;
-               tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
-
-               /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
-               max_cqe = time_elapsed * tick_cqe;
-
-               for (i = 0; i < phba->cfg_hdw_queue; i++) {
-                       /* Fast-path EQ */
-                       qp = phba->sli4_hba.hdwq[i].hba_eq;
-                       if (!qp)
-                               continue;
-
-                       /* Use no EQ delay if we don't have many outstanding
-                        * IOs, or if we are only processing 1 CQE/ISR or less.
-                        * Otherwise, assume we can process up to lpfc_fcp_imax
-                        * interrupts per HBA.
-                        */
-                       if (tot < LPFC_NODELAY_MAX_IO ||
-                           qp->EQ_cqe_cnt <= max_cqe)
-                               val = 0;
-                       else
-                               val = phba->cfg_fcp_imax;
-
-                       if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
-                               /* Use EQ Delay Register method */
-
-                               /* Convert for EQ Delay register */
-                               if (val) {
-                                       /* First, interrupts per sec per EQ */
-                                       val = phba->cfg_fcp_imax /
-                                               phba->cfg_hdw_queue;
-
-                                       /* us delay between each interrupt */
-                                       val = LPFC_SEC_TO_USEC / val;
-                               }
-                               if (val != qp->q_mode) {
-                                       reg_data.word0 = 0;
-                                       bf_set(lpfc_sliport_eqdelay_id,
-                                              &reg_data, qp->queue_id);
-                                       bf_set(lpfc_sliport_eqdelay_delay,
-                                              &reg_data, val);
-                                       writel(reg_data.word0, eqdreg);
-                               }
-                       } else {
-                               /* Use mbox command method */
-                               if (val != qp->q_mode)
-                                       lpfc_modify_hba_eq_delay(phba, i,
-                                                                1, val);
-                       }
-
-                       /*
-                        * val is cfg_fcp_imax or 0 for mbox delay or us delay
-                        * between interrupts for EQDR.
-                        */
-                       qp->q_mode = val;
-                       qp->EQ_cqe_cnt = 0;
-               }
-       }
-
-skip_eqdelay:
        spin_lock_irq(&phba->pport->work_port_lock);
 
        if (time_after(phba->last_completion_time +
@@ -2983,6 +2941,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
 {
        if (phba->pport)
                lpfc_stop_vport_timers(phba->pport);
+       cancel_delayed_work_sync(&phba->eq_delay_work);
        del_timer_sync(&phba->sli.mbox_tmo);
        del_timer_sync(&phba->fabric_block_timer);
        del_timer_sync(&phba->eratt_poll);
@@ -4131,7 +4090,7 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
        /* Sanity check to ensure our sizing is right for both SCSI and NVME */
        if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
                lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                               "6426 Common buffer size %ld exceeds %d\n",
+                               "6426 Common buffer size %zd exceeds %d\n",
                                sizeof(struct lpfc_io_buf),
                                LPFC_COMMON_IO_BUF_SZ);
                return 0;
@@ -4201,6 +4160,7 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
                lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
                lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
                lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+               spin_lock_init(&lpfc_ncmd->buf_lock);
 
                /* add the nvme buffer to a post list */
                list_add_tail(&lpfc_ncmd->list, &post_nblist);
@@ -4330,13 +4290,21 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        vport->fc_rscn_flush = 0;
        lpfc_get_vport_cfgparam(vport);
 
+       /* Adjust value in vport */
+       vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
+
        shost->unique_id = instance;
        shost->max_id = LPFC_MAX_TARGET;
        shost->max_lun = vport->cfg_max_luns;
        shost->this_id = -1;
        shost->max_cmd_len = 16;
+
        if (phba->sli_rev == LPFC_SLI_REV4) {
-               shost->nr_hw_queues = phba->cfg_hdw_queue;
+               if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
+                       shost->nr_hw_queues = phba->cfg_hdw_queue;
+               else
+                       shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
+
                shost->dma_boundary =
                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
                shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
@@ -6226,6 +6194,8 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
        /* Heartbeat timer */
        timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
 
+       INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
+
        return 0;
 }
 
@@ -6407,8 +6377,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        u32 if_type;
        u32 if_fam;
 
-       phba->sli4_hba.num_online_cpu = num_online_cpus();
        phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+       phba->sli4_hba.num_possible_cpu = num_possible_cpus();
        phba->sli4_hba.curr_disp_cpu = 0;
 
        /* Get all the module params for configuring this host */
@@ -6819,7 +6789,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_remove_rpi_hdrs;
        }
 
-       phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_hdw_queue,
+       phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
                                            sizeof(struct lpfc_hba_eq_hdl),
                                            GFP_KERNEL);
        if (!phba->sli4_hba.hba_eq_hdl) {
@@ -6830,7 +6800,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_fcf_rr_bmask;
        }
 
-       phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
+       phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
                                        sizeof(struct lpfc_vector_map_info),
                                        GFP_KERNEL);
        if (!phba->sli4_hba.cpu_map) {
@@ -6841,6 +6811,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_hba_eq_hdl;
        }
 
+       phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
+       if (!phba->sli4_hba.eq_info) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3321 Failed allocation for per_cpu stats\n");
+               rc = -ENOMEM;
+               goto out_free_hba_cpu_map;
+       }
        /*
         * Enable sr-iov virtual functions if supported and configured
         * through the module parameter.
@@ -6860,6 +6837,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        return 0;
 
+out_free_hba_cpu_map:
+       kfree(phba->sli4_hba.cpu_map);
 out_free_hba_eq_hdl:
        kfree(phba->sli4_hba.hba_eq_hdl);
 out_free_fcf_rr_bmask:
@@ -6889,10 +6868,12 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 {
        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
 
+       free_percpu(phba->sli4_hba.eq_info);
+
        /* Free memory allocated for msi-x interrupt vector to CPU mapping */
        kfree(phba->sli4_hba.cpu_map);
+       phba->sli4_hba.num_possible_cpu = 0;
        phba->sli4_hba.num_present_cpu = 0;
-       phba->sli4_hba.num_online_cpu = 0;
        phba->sli4_hba.curr_disp_cpu = 0;
 
        /* Free memory allocated for fast-path work queue handles */
@@ -8257,7 +8238,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
        struct lpfc_rsrc_desc_fcfcoe *desc;
        char *pdesc_0;
        uint16_t forced_link_speed;
-       uint32_t if_type;
+       uint32_t if_type, qmin;
        int length, i, rc = 0, rc2;
 
        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -8362,40 +8343,44 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                                phba->sli4_hba.max_cfg_param.max_rq);
 
                /*
-                * Calculate NVME queue resources based on how
-                * many WQ/CQs are available.
+                * Calculate queue resources based on how
+                * many WQ/CQ/EQs are available.
                 */
-               if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-                       length = phba->sli4_hba.max_cfg_param.max_wq;
-                       if (phba->sli4_hba.max_cfg_param.max_cq <
-                           phba->sli4_hba.max_cfg_param.max_wq)
-                               length = phba->sli4_hba.max_cfg_param.max_cq;
+               qmin = phba->sli4_hba.max_cfg_param.max_wq;
+               if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
+                       qmin = phba->sli4_hba.max_cfg_param.max_cq;
+               if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
+                       qmin = phba->sli4_hba.max_cfg_param.max_eq;
+               /*
+                * Whats left after this can go toward NVME / FCP.
+                * The minus 4 accounts for ELS, NVME LS, MBOX
+                * plus one extra. When configured for
+                * NVMET, FCP io channel WQs are not created.
+                */
+               qmin -= 4;
 
-                       /*
-                        * Whats left after this can go toward NVME.
-                        * The minus 6 accounts for ELS, NVME LS, MBOX
-                        * plus a couple extra. When configured for
-                        * NVMET, FCP io channel WQs are not created.
-                        */
-                       length -= 6;
-
-                       /* Take off FCP queues */
-                       if (!phba->nvmet_support)
-                               length -= phba->cfg_hdw_queue;
-
-                       /* Check to see if there is enough for NVME */
-                       if (phba->cfg_hdw_queue > length) {
-                               lpfc_printf_log(
-                                       phba, KERN_ERR, LOG_SLI,
-                                       "2005 Reducing NVME IO channel to %d: "
-                                       "WQ %d CQ %d CommonIO %d\n",
-                                       length,
+               /* If NVME is configured, double the number of CQ/WQs needed */
+               if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+                   !phba->nvmet_support)
+                       qmin /= 2;
+
+               /* Check to see if there is enough for NVME */
+               if ((phba->cfg_irq_chann > qmin) ||
+                   (phba->cfg_hdw_queue > qmin)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "2005 Reducing Queues: "
+                                       "WQ %d CQ %d EQ %d: min %d: "
+                                       "IRQ %d HDWQ %d\n",
                                        phba->sli4_hba.max_cfg_param.max_wq,
                                        phba->sli4_hba.max_cfg_param.max_cq,
+                                       phba->sli4_hba.max_cfg_param.max_eq,
+                                       qmin, phba->cfg_irq_chann,
                                        phba->cfg_hdw_queue);
 
-                               phba->cfg_hdw_queue = length;
-                       }
+                       if (phba->cfg_irq_chann > qmin)
+                               phba->cfg_irq_chann = qmin;
+                       if (phba->cfg_hdw_queue > qmin)
+                               phba->cfg_hdw_queue = qmin;
                }
        }
 
@@ -8612,25 +8597,17 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
         * device parameters
         */
 
-       if (phba->cfg_hdw_queue > phba->sli4_hba.max_cfg_param.max_eq) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "2575 Reducing IO channels to match number of "
-                               "available EQs: from %d to %d\n",
-                               phba->cfg_hdw_queue,
-                               phba->sli4_hba.max_cfg_param.max_eq);
-               phba->cfg_hdw_queue = phba->sli4_hba.max_cfg_param.max_eq;
-       }
-
        if (phba->nvmet_support) {
-               if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
-                       phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
+               if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
+                       phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
        }
        if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
                phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
 
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                       "2574 IO channels: hdwQ %d MRQ: %d\n",
-                       phba->cfg_hdw_queue, phba->cfg_nvmet_mrq);
+                       "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
+                       phba->cfg_hdw_queue, phba->cfg_irq_chann,
+                       phba->cfg_nvmet_mrq);
 
        /* Get EQ depth from module parameter, fake the default for now */
        phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -8658,6 +8635,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
        }
        qdesc->qe_valid = 1;
        qdesc->hdwq = wqidx;
+       qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
        phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
 
        qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
@@ -8669,6 +8647,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
                return 1;
        }
        qdesc->hdwq = wqidx;
+       qdesc->chann = wqidx;
        phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
        return 0;
@@ -8698,6 +8677,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
        }
        qdesc->qe_valid = 1;
        qdesc->hdwq = wqidx;
+       qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
        phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
 
        /* Create Fast Path FCP WQs */
@@ -8720,6 +8700,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
                return 1;
        }
        qdesc->hdwq = wqidx;
+       qdesc->chann = wqidx;
        phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
        return 0;
@@ -8743,8 +8724,9 @@ int
 lpfc_sli4_queue_create(struct lpfc_hba *phba)
 {
        struct lpfc_queue *qdesc;
-       int idx;
+       int idx, eqidx;
        struct lpfc_sli4_hdw_queue *qp;
+       struct lpfc_eq_intr_info *eqi;
 
        /*
         * Create HBA Record arrays.
@@ -8829,7 +8811,18 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 
        /* Create HBA Event Queues (EQs) */
        for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
-               /* Create EQs */
+               /*
+                * If there are more Hardware Queues than available
+                * CQs, multiple Hardware Queues may share a common EQ.
+                */
+               if (idx >= phba->cfg_irq_chann) {
+                       /* Share an existing EQ */
+                       eqidx = lpfc_find_eq_handle(phba, idx);
+                       phba->sli4_hba.hdwq[idx].hba_eq =
+                               phba->sli4_hba.hdwq[eqidx].hba_eq;
+                       continue;
+               }
+               /* Create an EQ */
                qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
                                              phba->sli4_hba.eq_esize,
                                              phba->sli4_hba.eq_ecount);
@@ -8840,20 +8833,30 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                }
                qdesc->qe_valid = 1;
                qdesc->hdwq = idx;
+
+               /* Save the CPU this EQ is affinitised to */
+               eqidx = lpfc_find_eq_handle(phba, idx);
+               qdesc->chann = lpfc_find_cpu_handle(phba, eqidx,
+                                                   LPFC_FIND_BY_EQ);
                phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
+               qdesc->last_cpu = qdesc->chann;
+               eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
+               list_add(&qdesc->cpu_list, &eqi->list);
        }
 
 
        /* Allocate SCSI SLI4 CQ/WQs */
-       for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
                if (lpfc_alloc_fcp_wq_cq(phba, idx))
                        goto out_error;
+       }
 
        /* Allocate NVME SLI4 CQ/WQs */
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-               for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
+               for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
                        if (lpfc_alloc_nvme_wq_cq(phba, idx))
                                goto out_error;
+               }
 
                if (phba->nvmet_support) {
                        for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
@@ -8871,6 +8874,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                                }
                                qdesc->qe_valid = 1;
                                qdesc->hdwq = idx;
+                               qdesc->chann = idx;
                                phba->sli4_hba.nvmet_cqset[idx] = qdesc;
                        }
                }
@@ -8902,6 +8906,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                goto out_error;
        }
        qdesc->qe_valid = 1;
+       qdesc->chann = 0;
        phba->sli4_hba.els_cq = qdesc;
 
 
@@ -8919,6 +8924,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                                "0505 Failed allocate slow-path MQ\n");
                goto out_error;
        }
+       qdesc->chann = 0;
        phba->sli4_hba.mbx_wq = qdesc;
 
        /*
@@ -8934,6 +8940,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                                "0504 Failed allocate slow-path ELS WQ\n");
                goto out_error;
        }
+       qdesc->chann = 0;
        phba->sli4_hba.els_wq = qdesc;
        list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
 
@@ -8947,6 +8954,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                                        "6079 Failed allocate NVME LS CQ\n");
                        goto out_error;
                }
+               qdesc->chann = 0;
                qdesc->qe_valid = 1;
                phba->sli4_hba.nvmels_cq = qdesc;
 
@@ -8959,6 +8967,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                                        "6080 Failed allocate NVME LS WQ\n");
                        goto out_error;
                }
+               qdesc->chann = 0;
                phba->sli4_hba.nvmels_wq = qdesc;
                list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
        }
@@ -9085,17 +9094,21 @@ lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
 }
 
 static inline void
-lpfc_sli4_release_hdwq(struct lpfc_sli4_hdw_queue *hdwq, int max)
+lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
 {
+       struct lpfc_sli4_hdw_queue *hdwq;
        uint32_t idx;
 
-       for (idx = 0; idx < max; idx++) {
-               lpfc_sli4_queue_free(hdwq[idx].hba_eq);
+       hdwq = phba->sli4_hba.hdwq;
+       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+               if (idx < phba->cfg_irq_chann)
+                       lpfc_sli4_queue_free(hdwq[idx].hba_eq);
+               hdwq[idx].hba_eq = NULL;
+
                lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
                lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
                lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
                lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
-               hdwq[idx].hba_eq = NULL;
                hdwq[idx].fcp_cq = NULL;
                hdwq[idx].nvme_cq = NULL;
                hdwq[idx].fcp_wq = NULL;
@@ -9120,8 +9133,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
 {
        /* Release HBA eqs */
        if (phba->sli4_hba.hdwq)
-               lpfc_sli4_release_hdwq(phba->sli4_hba.hdwq,
-                                      phba->cfg_hdw_queue);
+               lpfc_sli4_release_hdwq(phba);
 
        if (phba->nvmet_support) {
                lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
@@ -9202,7 +9214,6 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
                        qidx, (uint32_t)rc);
                return rc;
        }
-       cq->chann = qidx;
 
        if (qtype != LPFC_MBOX) {
                /* Setup cq_map for fast lookup */
@@ -9222,7 +9233,6 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
                        /* no need to tear down cq - caller will do so */
                        return rc;
                }
-               wq->chann = qidx;
 
                /* Bind this CQ/WQ to the NVME ring */
                pring = wq->pring;
@@ -9251,6 +9261,38 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
        return 0;
 }
 
+/**
+ * lpfc_setup_cq_lookup - Setup the CQ lookup table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will populate the cq_lookup table by all
+ * available CQ queue_id's.
+ **/
+void
+lpfc_setup_cq_lookup(struct lpfc_hba *phba)
+{
+       struct lpfc_queue *eq, *childq;
+       struct lpfc_sli4_hdw_queue *qp;
+       int qidx;
+
+       qp = phba->sli4_hba.hdwq;
+       memset(phba->sli4_hba.cq_lookup, 0,
+              (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
+       for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+               eq = qp[qidx].hba_eq;
+               if (!eq)
+                       continue;
+               list_for_each_entry(childq, &eq->child_list, list) {
+                       if (childq->queue_id > phba->sli4_hba.cq_max)
+                               continue;
+                       if ((childq->subtype == LPFC_FCP) ||
+                           (childq->subtype == LPFC_NVME))
+                               phba->sli4_hba.cq_lookup[childq->queue_id] =
+                                       childq;
+               }
+       }
+}
+
 /**
  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
  * @phba: pointer to lpfc hba data structure.
@@ -9271,7 +9313,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
        struct lpfc_sli4_hdw_queue *qp;
        LPFC_MBOXQ_t *mboxq;
        int qidx;
-       uint32_t length;
+       uint32_t length, usdelay;
        int rc = -ENOMEM;
 
        /* Check for dual-ULP support */
@@ -9331,7 +9373,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                rc = -ENOMEM;
                goto out_error;
        }
-       for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+       for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
                if (!qp[qidx].hba_eq) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0522 Fast-path EQ (%d) not "
@@ -9578,11 +9620,29 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                        phba->sli4_hba.dat_rq->queue_id,
                        phba->sli4_hba.els_cq->queue_id);
 
-       for (qidx = 0; qidx < phba->cfg_hdw_queue;
+       if (phba->cfg_fcp_imax)
+               usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
+       else
+               usdelay = 0;
+
+       for (qidx = 0; qidx < phba->cfg_irq_chann;
             qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
                lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
-                                        phba->cfg_fcp_imax);
+                                        usdelay);
 
+       if (phba->sli4_hba.cq_max) {
+               kfree(phba->sli4_hba.cq_lookup);
+               phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
+                       sizeof(struct lpfc_queue *), GFP_KERNEL);
+               if (!phba->sli4_hba.cq_lookup) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0549 Failed setup of CQ Lookup table: "
+                                       "size 0x%x\n", phba->sli4_hba.cq_max);
+                       rc = -ENOMEM;
+                       goto out_destroy;
+               }
+               lpfc_setup_cq_lookup(phba);
+       }
        return 0;
 
 out_destroy:
@@ -9664,9 +9724,14 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
                        lpfc_wq_destroy(phba, qp->nvme_wq);
                        lpfc_cq_destroy(phba, qp->fcp_cq);
                        lpfc_cq_destroy(phba, qp->nvme_cq);
-                       lpfc_eq_destroy(phba, qp->hba_eq);
+                       if (qidx < phba->cfg_irq_chann)
+                               lpfc_eq_destroy(phba, qp->hba_eq);
                }
        }
+
+       kfree(phba->sli4_hba.cq_lookup);
+       phba->sli4_hba.cq_lookup = NULL;
+       phba->sli4_hba.cq_max = 0;
 }
 
 /**
@@ -9984,7 +10049,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
 {
        struct pci_dev *pdev = phba->pcidev;
        unsigned long bar0map_len, bar1map_len, bar2map_len;
-       int error = -ENODEV;
+       int error;
        uint32_t if_type;
 
        if (!pdev)
@@ -10001,7 +10066,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
         */
        if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
                                  &phba->sli4_hba.sli_intf.word0)) {
-               return error;
+               return -ENODEV;
        }
 
        /* There is no SLI3 failback for SLI4 devices. */
@@ -10011,7 +10076,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                                "2894 SLI_INTF reg contents invalid "
                                "sli_intf reg 0x%x\n",
                                phba->sli4_hba.sli_intf.word0);
-               return error;
+               return -ENODEV;
        }
 
        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
@@ -10035,7 +10100,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                        dev_printk(KERN_ERR, &pdev->dev,
                                   "ioremap failed for SLI4 PCI config "
                                   "registers.\n");
-                       goto out;
+                       return -ENODEV;
                }
                phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
                /* Set up BAR0 PCI config space register memory map */
@@ -10046,7 +10111,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
                        dev_printk(KERN_ERR, &pdev->dev,
                           "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
-                       goto out;
+                       return -ENODEV;
                }
                phba->sli4_hba.conf_regs_memmap_p =
                                ioremap(phba->pci_bar0_map, bar0map_len);
@@ -10054,7 +10119,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                        dev_printk(KERN_ERR, &pdev->dev,
                                "ioremap failed for SLI4 PCI config "
                                "registers.\n");
-                               goto out;
+                       return -ENODEV;
                }
                lpfc_sli4_bar0_register_memmap(phba, if_type);
        }
@@ -10100,6 +10165,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                if (!phba->sli4_hba.drbl_regs_memmap_p) {
                        dev_err(&pdev->dev,
                           "ioremap failed for SLI4 HBA doorbell registers.\n");
+                       error = -ENOMEM;
                        goto out_iounmap_conf;
                }
                phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
@@ -10149,6 +10215,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                if (!phba->sli4_hba.dpp_regs_memmap_p) {
                        dev_err(&pdev->dev,
                           "ioremap failed for SLI4 HBA dpp registers.\n");
+                       error = -ENOMEM;
                        goto out_iounmap_ctrl;
                }
                phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
@@ -10159,13 +10226,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
        case LPFC_SLI_INTF_IF_TYPE_0:
        case LPFC_SLI_INTF_IF_TYPE_2:
                phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
-               phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
-               phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
+               phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
+               phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
                break;
        case LPFC_SLI_INTF_IF_TYPE_6:
                phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
-               phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
-               phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
+               phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
+               phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
                break;
        default:
                break;
@@ -10179,7 +10246,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
        iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
 out_iounmap_conf:
        iounmap(phba->sli4_hba.conf_regs_memmap_p);
-out:
+
        return error;
 }
 
@@ -10445,23 +10512,98 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
        phba->sli.slistat.sli_intr = 0;
 }
 
+/**
+ * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified EQ
+ * @phba: pointer to lpfc hba data structure.
+ * @id: EQ vector index or Hardware Queue index
+ * @match: LPFC_FIND_BY_EQ = match by EQ
+ *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
+ */
+static uint16_t
+lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
+{
+       struct lpfc_vector_map_info *cpup;
+       int cpu;
+
+       /* Find the desired phys_id for the specified EQ */
+       for_each_present_cpu(cpu) {
+               cpup = &phba->sli4_hba.cpu_map[cpu];
+               if ((match == LPFC_FIND_BY_EQ) &&
+                   (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+                   (cpup->eq == id))
+                       return cpu;
+               if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
+                       return cpu;
+       }
+       return 0;
+}
+
+/**
+ * lpfc_find_eq_handle - Find the EQ that corresponds to the specified
+ *                       Hardware Queue
+ * @phba: pointer to lpfc hba data structure.
+ * @hdwq: Hardware Queue index
+ */
+static uint16_t
+lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq)
+{
+       struct lpfc_vector_map_info *cpup;
+       int cpu;
+
+       /* Find the desired phys_id for the specified EQ */
+       for_each_present_cpu(cpu) {
+               cpup = &phba->sli4_hba.cpu_map[cpu];
+               if (cpup->hdwq == hdwq)
+                       return cpup->eq;
+       }
+       return 0;
+}
+
+#ifdef CONFIG_X86
+/**
+ * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: CPU map index
+ * @phys_id: CPU package physical id
+ * @core_id: CPU core id
+ */
+static int
+lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
+               uint16_t phys_id, uint16_t core_id)
+{
+       struct lpfc_vector_map_info *cpup;
+       int idx;
+
+       for_each_present_cpu(idx) {
+               cpup = &phba->sli4_hba.cpu_map[idx];
+               /* Does the cpup match the one we are looking for */
+               if ((cpup->phys_id == phys_id) &&
+                   (cpup->core_id == core_id) &&
+                   (cpu != idx))
+                       return 1;
+       }
+       return 0;
+}
+#endif
+
 /**
  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
  * @phba: pointer to lpfc hba data structure.
+ * @vectors: number of msix vectors allocated.
  *
  * The routine will figure out the CPU affinity assignment for every
- * MSI-X vector allocated for the HBA.  The hba_eq_hdl will be updated
- * with a pointer to the CPU mask that defines ALL the CPUs this vector
- * can be associated with. If the vector can be unquely associated with
- * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
+ * MSI-X vector allocated for the HBA.
  * In addition, the CPU to IO channel mapping will be calculated
  * and the phba->sli4_hba.cpu_map array will reflect this.
  */
 static void
-lpfc_cpu_affinity_check(struct lpfc_hba *phba)
+lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
 {
+       int i, cpu, idx;
+       int max_phys_id, min_phys_id;
+       int max_core_id, min_core_id;
        struct lpfc_vector_map_info *cpup;
-       int cpu, idx;
+       const struct cpumask *maskp;
 #ifdef CONFIG_X86
        struct cpuinfo_x86 *cpuinfo;
 #endif
@@ -10469,43 +10611,72 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba)
        /* Init cpu_map array */
        memset(phba->sli4_hba.cpu_map, 0xff,
               (sizeof(struct lpfc_vector_map_info) *
-              phba->sli4_hba.num_present_cpu));
+              phba->sli4_hba.num_possible_cpu));
+
+       max_phys_id = 0;
+       min_phys_id = 0xffff;
+       max_core_id = 0;
+       min_core_id = 0xffff;
 
        /* Update CPU map with physical id and core id of each CPU */
-       cpup = phba->sli4_hba.cpu_map;
-       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+       for_each_present_cpu(cpu) {
+               cpup = &phba->sli4_hba.cpu_map[cpu];
 #ifdef CONFIG_X86
                cpuinfo = &cpu_data(cpu);
                cpup->phys_id = cpuinfo->phys_proc_id;
                cpup->core_id = cpuinfo->cpu_core_id;
+               cpup->hyper = lpfc_find_hyper(phba, cpu,
+                                             cpup->phys_id, cpup->core_id);
 #else
                /* No distinction between CPUs for other platforms */
                cpup->phys_id = 0;
-               cpup->core_id = 0;
+               cpup->core_id = cpu;
+               cpup->hyper = 0;
 #endif
+
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "3328 CPU physid %d coreid %d\n",
                                cpup->phys_id, cpup->core_id);
-               cpup++;
+
+               if (cpup->phys_id > max_phys_id)
+                       max_phys_id = cpup->phys_id;
+               if (cpup->phys_id < min_phys_id)
+                       min_phys_id = cpup->phys_id;
+
+               if (cpup->core_id > max_core_id)
+                       max_core_id = cpup->core_id;
+               if (cpup->core_id < min_core_id)
+                       min_core_id = cpup->core_id;
        }
 
-       for (idx = 0; idx <  phba->cfg_hdw_queue; idx++) {
-               cpup = &phba->sli4_hba.cpu_map[idx];
-               cpup->irq = pci_irq_vector(phba->pcidev, idx);
+       for_each_possible_cpu(i) {
+               struct lpfc_eq_intr_info *eqi =
+                       per_cpu_ptr(phba->sli4_hba.eq_info, i);
 
-               /* For now assume vector N maps to CPU N */
-               irq_set_affinity_hint(cpup->irq, get_cpu_mask(idx));
-               cpup->hdwq = idx;
+               INIT_LIST_HEAD(&eqi->list);
+               eqi->icnt = 0;
+       }
 
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "3336 Set Affinity: CPU %d "
-                       "hdwq %d irq %d\n",
-                       cpu, cpup->hdwq, cpup->irq);
+       for (idx = 0; idx <  phba->cfg_irq_chann; idx++) {
+               maskp = pci_irq_get_affinity(phba->pcidev, idx);
+               if (!maskp)
+                       continue;
+
+               for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+                       cpup = &phba->sli4_hba.cpu_map[cpu];
+                       cpup->eq = idx;
+                       cpup->hdwq = idx;
+                       cpup->irq = pci_irq_vector(phba->pcidev, idx);
+
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3336 Set Affinity: CPU %d "
+                                       "hdwq %d irq %d\n",
+                                       cpu, cpup->hdwq, cpup->irq);
+               }
        }
        return;
 }
 
-
 /**
  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  * @phba: pointer to lpfc hba data structure.
@@ -10524,10 +10695,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
        char *name;
 
        /* Set up MSI-X multi-message vectors */
-       vectors = phba->cfg_hdw_queue;
+       vectors = phba->cfg_irq_chann;
 
        rc = pci_alloc_irq_vectors(phba->pcidev,
-                               (phba->nvmet_support) ? 1 : 2,
+                               1,
                                vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
        if (rc < 0) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -10545,7 +10716,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
 
                phba->sli4_hba.hba_eq_hdl[index].idx = index;
                phba->sli4_hba.hba_eq_hdl[index].phba = phba;
-               atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
                rc = request_irq(pci_irq_vector(phba->pcidev, index),
                         &lpfc_sli4_hba_intr_handler, 0,
                         name,
@@ -10558,17 +10728,16 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
                }
        }
 
-       if (vectors != phba->cfg_hdw_queue) {
+       if (vectors != phba->cfg_irq_chann) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "3238 Reducing IO channels to match number of "
                                "MSI-X vectors, requested %d got %d\n",
-                               phba->cfg_hdw_queue, vectors);
-               if (phba->cfg_hdw_queue > vectors)
-                       phba->cfg_hdw_queue = vectors;
+                               phba->cfg_irq_chann, vectors);
+               if (phba->cfg_irq_chann > vectors)
+                       phba->cfg_irq_chann = vectors;
                if (phba->cfg_nvmet_mrq > vectors)
                        phba->cfg_nvmet_mrq = vectors;
        }
-       lpfc_cpu_affinity_check(phba);
 
        return rc;
 
@@ -10623,7 +10792,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
                return rc;
        }
 
-       for (index = 0; index < phba->cfg_hdw_queue; index++) {
+       for (index = 0; index < phba->cfg_irq_chann; index++) {
                phba->sli4_hba.hba_eq_hdl[index].idx = index;
                phba->sli4_hba.hba_eq_hdl[index].phba = phba;
        }
@@ -10688,11 +10857,10 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
                        phba->intr_type = INTx;
                        intr_mode = 0;
 
-                       for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+                       for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
                                eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
                                eqhdl->idx = idx;
                                eqhdl->phba = phba;
-                               atomic_set(&eqhdl->hba_eq_in_use, 1);
                        }
                }
        }
@@ -10716,7 +10884,7 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
                int index;
 
                /* Free up MSI-X multi-message vectors */
-               for (index = 0; index < phba->cfg_hdw_queue; index++) {
+               for (index = 0; index < phba->cfg_irq_chann; index++) {
                        irq_set_affinity_hint(
                                pci_irq_vector(phba->pcidev, index),
                                NULL);
@@ -10966,7 +11134,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
                lpfc_sli4_ras_dma_free(phba);
 
        /* Stop the SLI4 device port */
-       phba->pport->work_port_events = 0;
+       if (phba->pport)
+               phba->pport->work_port_events = 0;
 }
 
  /**
@@ -12092,12 +12261,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
        }
        /* Default to single EQ for non-MSI-X */
        if (phba->intr_type != MSIX) {
-               phba->cfg_hdw_queue = 1;
+               phba->cfg_irq_chann = 1;
                if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                        if (phba->nvmet_support)
                                phba->cfg_nvmet_mrq = 1;
                }
        }
+       lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
 
        /* Create SCSI host to the physical port */
        error = lpfc_create_shost(phba);