]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/lpfc/lpfc_init.c
Merge tag 'samsung-dt64-4.21-2' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / scsi / lpfc / lpfc_init.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/idr.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/kthread.h>
31 #include <linux/pci.h>
32 #include <linux/spinlock.h>
33 #include <linux/ctype.h>
34 #include <linux/aer.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/miscdevice.h>
38 #include <linux/percpu.h>
39 #include <linux/msi.h>
40 #include <linux/bitops.h>
41
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_transport_fc.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/fc/fc_fs.h>
48
49 #include <linux/nvme-fc-driver.h>
50
51 #include "lpfc_hw4.h"
52 #include "lpfc_hw.h"
53 #include "lpfc_sli.h"
54 #include "lpfc_sli4.h"
55 #include "lpfc_nl.h"
56 #include "lpfc_disc.h"
57 #include "lpfc.h"
58 #include "lpfc_scsi.h"
59 #include "lpfc_nvme.h"
60 #include "lpfc_nvmet.h"
61 #include "lpfc_logmsg.h"
62 #include "lpfc_crtn.h"
63 #include "lpfc_vport.h"
64 #include "lpfc_version.h"
65 #include "lpfc_ids.h"
66
67 char *_dump_buf_data;
68 unsigned long _dump_buf_data_order;
69 char *_dump_buf_dif;
70 unsigned long _dump_buf_dif_order;
71 spinlock_t _dump_buf_lock;
72
73 /* Used when mapping IRQ vectors in a driver centric manner */
74 uint16_t *lpfc_used_cpu;
75 uint32_t lpfc_present_cpu;
76
77 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
78 static int lpfc_post_rcv_buf(struct lpfc_hba *);
79 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
80 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
81 static int lpfc_setup_endian_order(struct lpfc_hba *);
82 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
83 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
84 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
85 static void lpfc_init_sgl_list(struct lpfc_hba *);
86 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
87 static void lpfc_free_active_sgl(struct lpfc_hba *);
88 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
89 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
90 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
91 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
92 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
93 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
94 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
95 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
96
97 static struct scsi_transport_template *lpfc_transport_template = NULL;
98 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
99 static DEFINE_IDR(lpfc_hba_index);
100 #define LPFC_NVMET_BUF_POST 254
101
102 /**
103  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
104  * @phba: pointer to lpfc hba data structure.
105  *
106  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
107  * mailbox command. It retrieves the revision information from the HBA and
108  * collects the Vital Product Data (VPD) about the HBA for preparing the
109  * configuration of the HBA.
110  *
111  * Return codes:
112  *   0 - success.
113  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
114  *   Any other value - indicates an error.
115  **/
116 int
117 lpfc_config_port_prep(struct lpfc_hba *phba)
118 {
119         lpfc_vpd_t *vp = &phba->vpd;
120         int i = 0, rc;
121         LPFC_MBOXQ_t *pmb;
122         MAILBOX_t *mb;
123         char *lpfc_vpd_data = NULL;
124         uint16_t offset = 0;
125         static char licensed[56] =
126                     "key unlock for use with gnu public licensed code only\0";
127         static int init_key = 1;
128
129         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
130         if (!pmb) {
131                 phba->link_state = LPFC_HBA_ERROR;
132                 return -ENOMEM;
133         }
134
135         mb = &pmb->u.mb;
136         phba->link_state = LPFC_INIT_MBX_CMDS;
137
138         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
139                 if (init_key) {
140                         uint32_t *ptext = (uint32_t *) licensed;
141
142                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
143                                 *ptext = cpu_to_be32(*ptext);
144                         init_key = 0;
145                 }
146
147                 lpfc_read_nv(phba, pmb);
148                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
149                         sizeof (mb->un.varRDnvp.rsvd3));
150                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
151                          sizeof (licensed));
152
153                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154
155                 if (rc != MBX_SUCCESS) {
156                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
157                                         "0324 Config Port initialization "
158                                         "error, mbxCmd x%x READ_NVPARM, "
159                                         "mbxStatus x%x\n",
160                                         mb->mbxCommand, mb->mbxStatus);
161                         mempool_free(pmb, phba->mbox_mem_pool);
162                         return -ERESTART;
163                 }
164                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
165                        sizeof(phba->wwnn));
166                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
167                        sizeof(phba->wwpn));
168         }
169
170         /*
171          * Clear all option bits except LPFC_SLI3_BG_ENABLED,
172          * which was already set in lpfc_get_cfgparam()
173          */
174         phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
175
176         /* Setup and issue mailbox READ REV command */
177         lpfc_read_rev(phba, pmb);
178         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179         if (rc != MBX_SUCCESS) {
180                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
181                                 "0439 Adapter failed to init, mbxCmd x%x "
182                                 "READ_REV, mbxStatus x%x\n",
183                                 mb->mbxCommand, mb->mbxStatus);
184                 mempool_free( pmb, phba->mbox_mem_pool);
185                 return -ERESTART;
186         }
187
188
189         /*
190          * The value of rr must be 1 since the driver set the cv field to 1.
191          * This setting requires the FW to set all revision fields.
192          */
193         if (mb->un.varRdRev.rr == 0) {
194                 vp->rev.rBit = 0;
195                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
196                                 "0440 Adapter failed to init, READ_REV has "
197                                 "missing revision information.\n");
198                 mempool_free(pmb, phba->mbox_mem_pool);
199                 return -ERESTART;
200         }
201
202         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
203                 mempool_free(pmb, phba->mbox_mem_pool);
204                 return -EINVAL;
205         }
206
207         /* Save information as VPD data */
208         vp->rev.rBit = 1;
209         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
210         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
211         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
212         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
213         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
214         vp->rev.biuRev = mb->un.varRdRev.biuRev;
215         vp->rev.smRev = mb->un.varRdRev.smRev;
216         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
217         vp->rev.endecRev = mb->un.varRdRev.endecRev;
218         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
219         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
220         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
221         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
222         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
223         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
224
225         /* If the sli feature level is less then 9, we must
226          * tear down all RPIs and VPIs on link down if NPIV
227          * is enabled.
228          */
229         if (vp->rev.feaLevelHigh < 9)
230                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
231
232         if (lpfc_is_LC_HBA(phba->pcidev->device))
233                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
234                                                 sizeof (phba->RandomData));
235
236         /* Get adapter VPD information */
237         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
238         if (!lpfc_vpd_data)
239                 goto out_free_mbox;
240         do {
241                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
242                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
243
244                 if (rc != MBX_SUCCESS) {
245                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
246                                         "0441 VPD not present on adapter, "
247                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
248                                         mb->mbxCommand, mb->mbxStatus);
249                         mb->un.varDmp.word_cnt = 0;
250                 }
251                 /* dump mem may return a zero when finished or we got a
252                  * mailbox error, either way we are done.
253                  */
254                 if (mb->un.varDmp.word_cnt == 0)
255                         break;
256                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
257                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
258                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
259                                       lpfc_vpd_data + offset,
260                                       mb->un.varDmp.word_cnt);
261                 offset += mb->un.varDmp.word_cnt;
262         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
263         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
264
265         kfree(lpfc_vpd_data);
266 out_free_mbox:
267         mempool_free(pmb, phba->mbox_mem_pool);
268         return 0;
269 }
270
271 /**
272  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
273  * @phba: pointer to lpfc hba data structure.
274  * @pmboxq: pointer to the driver internal queue element for mailbox command.
275  *
276  * This is the completion handler for driver's configuring asynchronous event
277  * mailbox command to the device. If the mailbox command returns successfully,
278  * it will set internal async event support flag to 1; otherwise, it will
279  * set internal async event support flag to 0.
280  **/
281 static void
282 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
283 {
284         if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
285                 phba->temp_sensor_support = 1;
286         else
287                 phba->temp_sensor_support = 0;
288         mempool_free(pmboxq, phba->mbox_mem_pool);
289         return;
290 }
291
292 /**
293  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
294  * @phba: pointer to lpfc hba data structure.
295  * @pmboxq: pointer to the driver internal queue element for mailbox command.
296  *
297  * This is the completion handler for dump mailbox command for getting
298  * wake up parameters. When this command complete, the response contain
299  * Option rom version of the HBA. This function translate the version number
300  * into a human readable string and store it in OptionROMVersion.
301  **/
302 static void
303 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
304 {
305         struct prog_id *prg;
306         uint32_t prog_id_word;
307         char dist = ' ';
308         /* character array used for decoding dist type. */
309         char dist_char[] = "nabx";
310
311         if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
312                 mempool_free(pmboxq, phba->mbox_mem_pool);
313                 return;
314         }
315
316         prg = (struct prog_id *) &prog_id_word;
317
318         /* word 7 contain option rom version */
319         prog_id_word = pmboxq->u.mb.un.varWords[7];
320
321         /* Decode the Option rom version word to a readable string */
322         if (prg->dist < 4)
323                 dist = dist_char[prg->dist];
324
325         if ((prg->dist == 3) && (prg->num == 0))
326                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
327                         prg->ver, prg->rev, prg->lev);
328         else
329                 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
330                         prg->ver, prg->rev, prg->lev,
331                         dist, prg->num);
332         mempool_free(pmboxq, phba->mbox_mem_pool);
333         return;
334 }
335
336 /**
337  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
338  *      cfg_soft_wwnn, cfg_soft_wwpn
339  * @vport: pointer to lpfc vport data structure.
340  *
341  *
342  * Return codes
343  *   None.
344  **/
345 void
346 lpfc_update_vport_wwn(struct lpfc_vport *vport)
347 {
348         uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
349         u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
350
351         /* If the soft name exists then update it using the service params */
352         if (vport->phba->cfg_soft_wwnn)
353                 u64_to_wwn(vport->phba->cfg_soft_wwnn,
354                            vport->fc_sparam.nodeName.u.wwn);
355         if (vport->phba->cfg_soft_wwpn)
356                 u64_to_wwn(vport->phba->cfg_soft_wwpn,
357                            vport->fc_sparam.portName.u.wwn);
358
359         /*
360          * If the name is empty or there exists a soft name
361          * then copy the service params name, otherwise use the fc name
362          */
363         if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
364                 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
365                         sizeof(struct lpfc_name));
366         else
367                 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
368                         sizeof(struct lpfc_name));
369
370         /*
371          * If the port name has changed, then set the Param changes flag
372          * to unreg the login
373          */
374         if (vport->fc_portname.u.wwn[0] != 0 &&
375                 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
376                         sizeof(struct lpfc_name)))
377                 vport->vport_flag |= FAWWPN_PARAM_CHG;
378
379         if (vport->fc_portname.u.wwn[0] == 0 ||
380             vport->phba->cfg_soft_wwpn ||
381             (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
382             vport->vport_flag & FAWWPN_SET) {
383                 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
384                         sizeof(struct lpfc_name));
385                 vport->vport_flag &= ~FAWWPN_SET;
386                 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
387                         vport->vport_flag |= FAWWPN_SET;
388         }
389         else
390                 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
391                         sizeof(struct lpfc_name));
392 }
393
394 /**
395  * lpfc_config_port_post - Perform lpfc initialization after config port
396  * @phba: pointer to lpfc hba data structure.
397  *
398  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
399  * command call. It performs all internal resource and state setups on the
400  * port: post IOCB buffers, enable appropriate host interrupt attentions,
401  * ELS ring timers, etc.
402  *
403  * Return codes
404  *   0 - success.
405  *   Any other value - error.
406  **/
407 int
408 lpfc_config_port_post(struct lpfc_hba *phba)
409 {
410         struct lpfc_vport *vport = phba->pport;
411         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
412         LPFC_MBOXQ_t *pmb;
413         MAILBOX_t *mb;
414         struct lpfc_dmabuf *mp;
415         struct lpfc_sli *psli = &phba->sli;
416         uint32_t status, timeout;
417         int i, j;
418         int rc;
419
420         spin_lock_irq(&phba->hbalock);
421         /*
422          * If the Config port completed correctly the HBA is not
423          * over heated any more.
424          */
425         if (phba->over_temp_state == HBA_OVER_TEMP)
426                 phba->over_temp_state = HBA_NORMAL_TEMP;
427         spin_unlock_irq(&phba->hbalock);
428
429         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
430         if (!pmb) {
431                 phba->link_state = LPFC_HBA_ERROR;
432                 return -ENOMEM;
433         }
434         mb = &pmb->u.mb;
435
436         /* Get login parameters for NID.  */
437         rc = lpfc_read_sparam(phba, pmb, 0);
438         if (rc) {
439                 mempool_free(pmb, phba->mbox_mem_pool);
440                 return -ENOMEM;
441         }
442
443         pmb->vport = vport;
444         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
445                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
446                                 "0448 Adapter failed init, mbxCmd x%x "
447                                 "READ_SPARM mbxStatus x%x\n",
448                                 mb->mbxCommand, mb->mbxStatus);
449                 phba->link_state = LPFC_HBA_ERROR;
450                 mp = (struct lpfc_dmabuf *) pmb->context1;
451                 mempool_free(pmb, phba->mbox_mem_pool);
452                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
453                 kfree(mp);
454                 return -EIO;
455         }
456
457         mp = (struct lpfc_dmabuf *) pmb->context1;
458
459         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
460         lpfc_mbuf_free(phba, mp->virt, mp->phys);
461         kfree(mp);
462         pmb->context1 = NULL;
463         lpfc_update_vport_wwn(vport);
464
465         /* Update the fc_host data structures with new wwn. */
466         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
467         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
468         fc_host_max_npiv_vports(shost) = phba->max_vpi;
469
470         /* If no serial number in VPD data, use low 6 bytes of WWNN */
471         /* This should be consolidated into parse_vpd ? - mr */
472         if (phba->SerialNumber[0] == 0) {
473                 uint8_t *outptr;
474
475                 outptr = &vport->fc_nodename.u.s.IEEE[0];
476                 for (i = 0; i < 12; i++) {
477                         status = *outptr++;
478                         j = ((status & 0xf0) >> 4);
479                         if (j <= 9)
480                                 phba->SerialNumber[i] =
481                                     (char)((uint8_t) 0x30 + (uint8_t) j);
482                         else
483                                 phba->SerialNumber[i] =
484                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
485                         i++;
486                         j = (status & 0xf);
487                         if (j <= 9)
488                                 phba->SerialNumber[i] =
489                                     (char)((uint8_t) 0x30 + (uint8_t) j);
490                         else
491                                 phba->SerialNumber[i] =
492                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
493                 }
494         }
495
496         lpfc_read_config(phba, pmb);
497         pmb->vport = vport;
498         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
499                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
500                                 "0453 Adapter failed to init, mbxCmd x%x "
501                                 "READ_CONFIG, mbxStatus x%x\n",
502                                 mb->mbxCommand, mb->mbxStatus);
503                 phba->link_state = LPFC_HBA_ERROR;
504                 mempool_free( pmb, phba->mbox_mem_pool);
505                 return -EIO;
506         }
507
508         /* Check if the port is disabled */
509         lpfc_sli_read_link_ste(phba);
510
511         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
512         i = (mb->un.varRdConfig.max_xri + 1);
513         if (phba->cfg_hba_queue_depth > i) {
514                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
515                                 "3359 HBA queue depth changed from %d to %d\n",
516                                 phba->cfg_hba_queue_depth, i);
517                 phba->cfg_hba_queue_depth = i;
518         }
519
520         /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
521         i = (mb->un.varRdConfig.max_xri >> 3);
522         if (phba->pport->cfg_lun_queue_depth > i) {
523                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
524                                 "3360 LUN queue depth changed from %d to %d\n",
525                                 phba->pport->cfg_lun_queue_depth, i);
526                 phba->pport->cfg_lun_queue_depth = i;
527         }
528
529         phba->lmt = mb->un.varRdConfig.lmt;
530
531         /* Get the default values for Model Name and Description */
532         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
533
534         phba->link_state = LPFC_LINK_DOWN;
535
536         /* Only process IOCBs on ELS ring till hba_state is READY */
537         if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
538                 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
539         if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
540                 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
541
542         /* Post receive buffers for desired rings */
543         if (phba->sli_rev != 3)
544                 lpfc_post_rcv_buf(phba);
545
546         /*
547          * Configure HBA MSI-X attention conditions to messages if MSI-X mode
548          */
549         if (phba->intr_type == MSIX) {
550                 rc = lpfc_config_msi(phba, pmb);
551                 if (rc) {
552                         mempool_free(pmb, phba->mbox_mem_pool);
553                         return -EIO;
554                 }
555                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
556                 if (rc != MBX_SUCCESS) {
557                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
558                                         "0352 Config MSI mailbox command "
559                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
560                                         pmb->u.mb.mbxCommand,
561                                         pmb->u.mb.mbxStatus);
562                         mempool_free(pmb, phba->mbox_mem_pool);
563                         return -EIO;
564                 }
565         }
566
567         spin_lock_irq(&phba->hbalock);
568         /* Initialize ERATT handling flag */
569         phba->hba_flag &= ~HBA_ERATT_HANDLED;
570
571         /* Enable appropriate host interrupts */
572         if (lpfc_readl(phba->HCregaddr, &status)) {
573                 spin_unlock_irq(&phba->hbalock);
574                 return -EIO;
575         }
576         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
577         if (psli->num_rings > 0)
578                 status |= HC_R0INT_ENA;
579         if (psli->num_rings > 1)
580                 status |= HC_R1INT_ENA;
581         if (psli->num_rings > 2)
582                 status |= HC_R2INT_ENA;
583         if (psli->num_rings > 3)
584                 status |= HC_R3INT_ENA;
585
586         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
587             (phba->cfg_poll & DISABLE_FCP_RING_INT))
588                 status &= ~(HC_R0INT_ENA);
589
590         writel(status, phba->HCregaddr);
591         readl(phba->HCregaddr); /* flush */
592         spin_unlock_irq(&phba->hbalock);
593
594         /* Set up ring-0 (ELS) timer */
595         timeout = phba->fc_ratov * 2;
596         mod_timer(&vport->els_tmofunc,
597                   jiffies + msecs_to_jiffies(1000 * timeout));
598         /* Set up heart beat (HB) timer */
599         mod_timer(&phba->hb_tmofunc,
600                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
601         phba->hb_outstanding = 0;
602         phba->last_completion_time = jiffies;
603         /* Set up error attention (ERATT) polling timer */
604         mod_timer(&phba->eratt_poll,
605                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
606
607         if (phba->hba_flag & LINK_DISABLED) {
608                 lpfc_printf_log(phba,
609                         KERN_ERR, LOG_INIT,
610                         "2598 Adapter Link is disabled.\n");
611                 lpfc_down_link(phba, pmb);
612                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
613                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
614                 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
615                         lpfc_printf_log(phba,
616                         KERN_ERR, LOG_INIT,
617                         "2599 Adapter failed to issue DOWN_LINK"
618                         " mbox command rc 0x%x\n", rc);
619
620                         mempool_free(pmb, phba->mbox_mem_pool);
621                         return -EIO;
622                 }
623         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
624                 mempool_free(pmb, phba->mbox_mem_pool);
625                 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
626                 if (rc)
627                         return rc;
628         }
629         /* MBOX buffer will be freed in mbox compl */
630         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
631         if (!pmb) {
632                 phba->link_state = LPFC_HBA_ERROR;
633                 return -ENOMEM;
634         }
635
636         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
637         pmb->mbox_cmpl = lpfc_config_async_cmpl;
638         pmb->vport = phba->pport;
639         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
640
641         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
642                 lpfc_printf_log(phba,
643                                 KERN_ERR,
644                                 LOG_INIT,
645                                 "0456 Adapter failed to issue "
646                                 "ASYNCEVT_ENABLE mbox status x%x\n",
647                                 rc);
648                 mempool_free(pmb, phba->mbox_mem_pool);
649         }
650
651         /* Get Option rom version */
652         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
653         if (!pmb) {
654                 phba->link_state = LPFC_HBA_ERROR;
655                 return -ENOMEM;
656         }
657
658         lpfc_dump_wakeup_param(phba, pmb);
659         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
660         pmb->vport = phba->pport;
661         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
662
663         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
664                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
665                                 "to get Option ROM version status x%x\n", rc);
666                 mempool_free(pmb, phba->mbox_mem_pool);
667         }
668
669         return 0;
670 }
671
672 /**
673  * lpfc_hba_init_link - Initialize the FC link
674  * @phba: pointer to lpfc hba data structure.
675  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
676  *
677  * This routine will issue the INIT_LINK mailbox command call.
678  * It is available to other drivers through the lpfc_hba data
679  * structure for use as a delayed link up mechanism with the
680  * module parameter lpfc_suppress_link_up.
681  *
682  * Return code
683  *              0 - success
684  *              Any other value - error
685  **/
686 static int
687 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
688 {
689         return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
690 }
691
692 /**
693  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
694  * @phba: pointer to lpfc hba data structure.
695  * @fc_topology: desired fc topology.
696  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
697  *
698  * This routine will issue the INIT_LINK mailbox command call.
699  * It is available to other drivers through the lpfc_hba data
700  * structure for use as a delayed link up mechanism with the
701  * module parameter lpfc_suppress_link_up.
702  *
703  * Return code
704  *              0 - success
705  *              Any other value - error
706  **/
707 int
708 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
709                                uint32_t flag)
710 {
711         struct lpfc_vport *vport = phba->pport;
712         LPFC_MBOXQ_t *pmb;
713         MAILBOX_t *mb;
714         int rc;
715
716         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
717         if (!pmb) {
718                 phba->link_state = LPFC_HBA_ERROR;
719                 return -ENOMEM;
720         }
721         mb = &pmb->u.mb;
722         pmb->vport = vport;
723
724         if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
725             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
726              !(phba->lmt & LMT_1Gb)) ||
727             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
728              !(phba->lmt & LMT_2Gb)) ||
729             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
730              !(phba->lmt & LMT_4Gb)) ||
731             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
732              !(phba->lmt & LMT_8Gb)) ||
733             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
734              !(phba->lmt & LMT_10Gb)) ||
735             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
736              !(phba->lmt & LMT_16Gb)) ||
737             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
738              !(phba->lmt & LMT_32Gb)) ||
739             ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
740              !(phba->lmt & LMT_64Gb))) {
741                 /* Reset link speed to auto */
742                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
743                         "1302 Invalid speed for this board:%d "
744                         "Reset link speed to auto.\n",
745                         phba->cfg_link_speed);
746                         phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
747         }
748         lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
749         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
750         if (phba->sli_rev < LPFC_SLI_REV4)
751                 lpfc_set_loopback_flag(phba);
752         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
753         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
754                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
755                         "0498 Adapter failed to init, mbxCmd x%x "
756                         "INIT_LINK, mbxStatus x%x\n",
757                         mb->mbxCommand, mb->mbxStatus);
758                 if (phba->sli_rev <= LPFC_SLI_REV3) {
759                         /* Clear all interrupt enable conditions */
760                         writel(0, phba->HCregaddr);
761                         readl(phba->HCregaddr); /* flush */
762                         /* Clear all pending interrupts */
763                         writel(0xffffffff, phba->HAregaddr);
764                         readl(phba->HAregaddr); /* flush */
765                 }
766                 phba->link_state = LPFC_HBA_ERROR;
767                 if (rc != MBX_BUSY || flag == MBX_POLL)
768                         mempool_free(pmb, phba->mbox_mem_pool);
769                 return -EIO;
770         }
771         phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
772         if (flag == MBX_POLL)
773                 mempool_free(pmb, phba->mbox_mem_pool);
774
775         return 0;
776 }
777
778 /**
779  * lpfc_hba_down_link - this routine downs the FC link
780  * @phba: pointer to lpfc hba data structure.
781  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
782  *
783  * This routine will issue the DOWN_LINK mailbox command call.
784  * It is available to other drivers through the lpfc_hba data
785  * structure for use to stop the link.
786  *
787  * Return code
788  *              0 - success
789  *              Any other value - error
790  **/
791 static int
792 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
793 {
794         LPFC_MBOXQ_t *pmb;
795         int rc;
796
797         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
798         if (!pmb) {
799                 phba->link_state = LPFC_HBA_ERROR;
800                 return -ENOMEM;
801         }
802
803         lpfc_printf_log(phba,
804                 KERN_ERR, LOG_INIT,
805                 "0491 Adapter Link is disabled.\n");
806         lpfc_down_link(phba, pmb);
807         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
808         rc = lpfc_sli_issue_mbox(phba, pmb, flag);
809         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
810                 lpfc_printf_log(phba,
811                 KERN_ERR, LOG_INIT,
812                 "2522 Adapter failed to issue DOWN_LINK"
813                 " mbox command rc 0x%x\n", rc);
814
815                 mempool_free(pmb, phba->mbox_mem_pool);
816                 return -EIO;
817         }
818         if (flag == MBX_POLL)
819                 mempool_free(pmb, phba->mbox_mem_pool);
820
821         return 0;
822 }
823
824 /**
825  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
826  * @phba: pointer to lpfc HBA data structure.
827  *
828  * This routine will do LPFC uninitialization before the HBA is reset when
829  * bringing down the SLI Layer.
830  *
831  * Return codes
832  *   0 - success.
833  *   Any other value - error.
834  **/
835 int
836 lpfc_hba_down_prep(struct lpfc_hba *phba)
837 {
838         struct lpfc_vport **vports;
839         int i;
840
841         if (phba->sli_rev <= LPFC_SLI_REV3) {
842                 /* Disable interrupts */
843                 writel(0, phba->HCregaddr);
844                 readl(phba->HCregaddr); /* flush */
845         }
846
847         if (phba->pport->load_flag & FC_UNLOADING)
848                 lpfc_cleanup_discovery_resources(phba->pport);
849         else {
850                 vports = lpfc_create_vport_work_array(phba);
851                 if (vports != NULL)
852                         for (i = 0; i <= phba->max_vports &&
853                                 vports[i] != NULL; i++)
854                                 lpfc_cleanup_discovery_resources(vports[i]);
855                 lpfc_destroy_vport_work_array(phba, vports);
856         }
857         return 0;
858 }
859
860 /**
861  * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
862  * rspiocb which got deferred
863  *
864  * @phba: pointer to lpfc HBA data structure.
865  *
866  * This routine will cleanup completed slow path events after HBA is reset
867  * when bringing down the SLI Layer.
868  *
869  *
870  * Return codes
871  *   void.
872  **/
873 static void
874 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
875 {
876         struct lpfc_iocbq *rspiocbq;
877         struct hbq_dmabuf *dmabuf;
878         struct lpfc_cq_event *cq_event;
879
880         spin_lock_irq(&phba->hbalock);
881         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
882         spin_unlock_irq(&phba->hbalock);
883
884         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
885                 /* Get the response iocb from the head of work queue */
886                 spin_lock_irq(&phba->hbalock);
887                 list_remove_head(&phba->sli4_hba.sp_queue_event,
888                                  cq_event, struct lpfc_cq_event, list);
889                 spin_unlock_irq(&phba->hbalock);
890
891                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
892                 case CQE_CODE_COMPL_WQE:
893                         rspiocbq = container_of(cq_event, struct lpfc_iocbq,
894                                                  cq_event);
895                         lpfc_sli_release_iocbq(phba, rspiocbq);
896                         break;
897                 case CQE_CODE_RECEIVE:
898                 case CQE_CODE_RECEIVE_V1:
899                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
900                                               cq_event);
901                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
902                 }
903         }
904 }
905
906 /**
907  * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
908  * @phba: pointer to lpfc HBA data structure.
909  *
910  * This routine will cleanup posted ELS buffers after the HBA is reset
911  * when bringing down the SLI Layer.
912  *
913  *
914  * Return codes
915  *   void.
916  **/
917 static void
918 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
919 {
920         struct lpfc_sli *psli = &phba->sli;
921         struct lpfc_sli_ring *pring;
922         struct lpfc_dmabuf *mp, *next_mp;
923         LIST_HEAD(buflist);
924         int count;
925
926         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
927                 lpfc_sli_hbqbuf_free_all(phba);
928         else {
929                 /* Cleanup preposted buffers on the ELS ring */
930                 pring = &psli->sli3_ring[LPFC_ELS_RING];
931                 spin_lock_irq(&phba->hbalock);
932                 list_splice_init(&pring->postbufq, &buflist);
933                 spin_unlock_irq(&phba->hbalock);
934
935                 count = 0;
936                 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
937                         list_del(&mp->list);
938                         count++;
939                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
940                         kfree(mp);
941                 }
942
943                 spin_lock_irq(&phba->hbalock);
944                 pring->postbufq_cnt -= count;
945                 spin_unlock_irq(&phba->hbalock);
946         }
947 }
948
949 /**
950  * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
951  * @phba: pointer to lpfc HBA data structure.
952  *
953  * This routine will cleanup the txcmplq after the HBA is reset when bringing
954  * down the SLI Layer.
955  *
956  * Return codes
957  *   void
958  **/
959 static void
960 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
961 {
962         struct lpfc_sli *psli = &phba->sli;
963         struct lpfc_queue *qp = NULL;
964         struct lpfc_sli_ring *pring;
965         LIST_HEAD(completions);
966         int i;
967         struct lpfc_iocbq *piocb, *next_iocb;
968
969         if (phba->sli_rev != LPFC_SLI_REV4) {
970                 for (i = 0; i < psli->num_rings; i++) {
971                         pring = &psli->sli3_ring[i];
972                         spin_lock_irq(&phba->hbalock);
973                         /* At this point in time the HBA is either reset or DOA
974                          * Nothing should be on txcmplq as it will
975                          * NEVER complete.
976                          */
977                         list_splice_init(&pring->txcmplq, &completions);
978                         pring->txcmplq_cnt = 0;
979                         spin_unlock_irq(&phba->hbalock);
980
981                         lpfc_sli_abort_iocb_ring(phba, pring);
982                 }
983                 /* Cancel all the IOCBs from the completions list */
984                 lpfc_sli_cancel_iocbs(phba, &completions,
985                                       IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
986                 return;
987         }
988         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
989                 pring = qp->pring;
990                 if (!pring)
991                         continue;
992                 spin_lock_irq(&pring->ring_lock);
993                 list_for_each_entry_safe(piocb, next_iocb,
994                                          &pring->txcmplq, list)
995                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
996                 list_splice_init(&pring->txcmplq, &completions);
997                 pring->txcmplq_cnt = 0;
998                 spin_unlock_irq(&pring->ring_lock);
999                 lpfc_sli_abort_iocb_ring(phba, pring);
1000         }
1001         /* Cancel all the IOCBs from the completions list */
1002         lpfc_sli_cancel_iocbs(phba, &completions,
1003                               IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1004 }
1005
1006 /**
1007  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1008         int i;
1009  * @phba: pointer to lpfc HBA data structure.
1010  *
1011  * This routine will do uninitialization after the HBA is reset when bring
1012  * down the SLI Layer.
1013  *
1014  * Return codes
1015  *   0 - success.
1016  *   Any other value - error.
1017  **/
1018 static int
1019 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1020 {
1021         lpfc_hba_free_post_buf(phba);
1022         lpfc_hba_clean_txcmplq(phba);
1023         return 0;
1024 }
1025
1026 /**
1027  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1028  * @phba: pointer to lpfc HBA data structure.
1029  *
1030  * This routine will do uninitialization after the HBA is reset when bring
1031  * down the SLI Layer.
1032  *
1033  * Return codes
1034  *   0 - success.
1035  *   Any other value - error.
1036  **/
1037 static int
1038 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1039 {
1040         struct lpfc_scsi_buf *psb, *psb_next;
1041         struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
1042         LIST_HEAD(aborts);
1043         LIST_HEAD(nvme_aborts);
1044         LIST_HEAD(nvmet_aborts);
1045         unsigned long iflag = 0;
1046         struct lpfc_sglq *sglq_entry = NULL;
1047         int cnt;
1048
1049
1050         lpfc_sli_hbqbuf_free_all(phba);
1051         lpfc_hba_clean_txcmplq(phba);
1052
1053         /* At this point in time the HBA is either reset or DOA. Either
1054          * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1055          * on the lpfc_els_sgl_list so that it can either be freed if the
1056          * driver is unloading or reposted if the driver is restarting
1057          * the port.
1058          */
1059         spin_lock_irq(&phba->hbalock);  /* required for lpfc_els_sgl_list and */
1060                                         /* scsl_buf_list */
1061         /* sgl_list_lock required because worker thread uses this
1062          * list.
1063          */
1064         spin_lock(&phba->sli4_hba.sgl_list_lock);
1065         list_for_each_entry(sglq_entry,
1066                 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1067                 sglq_entry->state = SGL_FREED;
1068
1069         list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1070                         &phba->sli4_hba.lpfc_els_sgl_list);
1071
1072
1073         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1074         /* abts_scsi_buf_list_lock required because worker thread uses this
1075          * list.
1076          */
1077         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
1078                 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1079                 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
1080                                  &aborts);
1081                 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1082         }
1083
1084         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1085                 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1086                 list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
1087                                  &nvme_aborts);
1088                 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1089                                  &nvmet_aborts);
1090                 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1091         }
1092
1093         spin_unlock_irq(&phba->hbalock);
1094
1095         list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1096                 psb->pCmd = NULL;
1097                 psb->status = IOSTAT_SUCCESS;
1098         }
1099         spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1100         list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
1101         spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1102
1103         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1104                 cnt = 0;
1105                 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
1106                         psb->pCmd = NULL;
1107                         psb->status = IOSTAT_SUCCESS;
1108                         cnt++;
1109                 }
1110                 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
1111                 phba->put_nvme_bufs += cnt;
1112                 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
1113                 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
1114
1115                 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1116                         ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1117                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1118                 }
1119         }
1120
1121         lpfc_sli4_free_sp_events(phba);
1122         return 0;
1123 }
1124
1125 /**
1126  * lpfc_hba_down_post - Wrapper func for hba down post routine
1127  * @phba: pointer to lpfc HBA data structure.
1128  *
1129  * This routine wraps the actual SLI3 or SLI4 routine for performing
1130  * uninitialization after the HBA is reset when bring down the SLI Layer.
1131  *
1132  * Return codes
1133  *   0 - success.
1134  *   Any other value - error.
1135  **/
1136 int
1137 lpfc_hba_down_post(struct lpfc_hba *phba)
1138 {
1139         return (*phba->lpfc_hba_down_post)(phba);
1140 }
1141
1142 /**
1143  * lpfc_hb_timeout - The HBA-timer timeout handler
1144  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1145  *
1146  * This is the HBA-timer timeout handler registered to the lpfc driver. When
1147  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1148  * work-port-events bitmap and the worker thread is notified. This timeout
1149  * event will be used by the worker thread to invoke the actual timeout
1150  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1151  * be performed in the timeout handler and the HBA timeout event bit shall
1152  * be cleared by the worker thread after it has taken the event bitmap out.
1153  **/
1154 static void
1155 lpfc_hb_timeout(struct timer_list *t)
1156 {
1157         struct lpfc_hba *phba;
1158         uint32_t tmo_posted;
1159         unsigned long iflag;
1160
1161         phba = from_timer(phba, t, hb_tmofunc);
1162
1163         /* Check for heart beat timeout conditions */
1164         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1165         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1166         if (!tmo_posted)
1167                 phba->pport->work_port_events |= WORKER_HB_TMO;
1168         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1169
1170         /* Tell the worker thread there is work to do */
1171         if (!tmo_posted)
1172                 lpfc_worker_wake_up(phba);
1173         return;
1174 }
1175
1176 /**
1177  * lpfc_rrq_timeout - The RRQ-timer timeout handler
1178  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1179  *
1180  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1181  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1182  * work-port-events bitmap and the worker thread is notified. This timeout
1183  * event will be used by the worker thread to invoke the actual timeout
1184  * handler routine, lpfc_rrq_handler. Any periodical operations will
1185  * be performed in the timeout handler and the RRQ timeout event bit shall
1186  * be cleared by the worker thread after it has taken the event bitmap out.
1187  **/
1188 static void
1189 lpfc_rrq_timeout(struct timer_list *t)
1190 {
1191         struct lpfc_hba *phba;
1192         unsigned long iflag;
1193
1194         phba = from_timer(phba, t, rrq_tmr);
1195         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1196         if (!(phba->pport->load_flag & FC_UNLOADING))
1197                 phba->hba_flag |= HBA_RRQ_ACTIVE;
1198         else
1199                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1200         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1201
1202         if (!(phba->pport->load_flag & FC_UNLOADING))
1203                 lpfc_worker_wake_up(phba);
1204 }
1205
1206 /**
1207  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1208  * @phba: pointer to lpfc hba data structure.
1209  * @pmboxq: pointer to the driver internal queue element for mailbox command.
1210  *
1211  * This is the callback function to the lpfc heart-beat mailbox command.
1212  * If configured, the lpfc driver issues the heart-beat mailbox command to
1213  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1214  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1215  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1216  * heart-beat outstanding state. Once the mailbox command comes back and
1217  * no error conditions detected, the heart-beat mailbox command timer is
1218  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1219  * state is cleared for the next heart-beat. If the timer expired with the
1220  * heart-beat outstanding state set, the driver will put the HBA offline.
1221  **/
1222 static void
1223 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1224 {
1225         unsigned long drvr_flag;
1226
1227         spin_lock_irqsave(&phba->hbalock, drvr_flag);
1228         phba->hb_outstanding = 0;
1229         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1230
1231         /* Check and reset heart-beat timer is necessary */
1232         mempool_free(pmboxq, phba->mbox_mem_pool);
1233         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1234                 !(phba->link_state == LPFC_HBA_ERROR) &&
1235                 !(phba->pport->load_flag & FC_UNLOADING))
1236                 mod_timer(&phba->hb_tmofunc,
1237                           jiffies +
1238                           msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1239         return;
1240 }
1241
1242 /**
1243  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1244  * @phba: pointer to lpfc hba data structure.
1245  *
1246  * This is the actual HBA-timer timeout handler to be invoked by the worker
1247  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1248  * handler performs any periodic operations needed for the device. If such
1249  * periodic event has already been attended to either in the interrupt handler
1250  * or by processing slow-ring or fast-ring events within the HBA-timer
1251  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1252  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1253  * is configured and there is no heart-beat mailbox command outstanding, a
1254  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1255  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1256  * to offline.
1257  **/
1258 void
1259 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1260 {
1261         struct lpfc_vport **vports;
1262         LPFC_MBOXQ_t *pmboxq;
1263         struct lpfc_dmabuf *buf_ptr;
1264         int retval, i;
1265         struct lpfc_sli *psli = &phba->sli;
1266         LIST_HEAD(completions);
1267         struct lpfc_queue *qp;
1268         unsigned long time_elapsed;
1269         uint32_t tick_cqe, max_cqe, val;
1270         uint64_t tot, data1, data2, data3;
1271         struct lpfc_nvmet_tgtport *tgtp;
1272         struct lpfc_register reg_data;
1273         struct nvme_fc_local_port *localport;
1274         struct lpfc_nvme_lport *lport;
1275         struct lpfc_nvme_ctrl_stat *cstat;
1276         void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
1277
1278         vports = lpfc_create_vport_work_array(phba);
1279         if (vports != NULL)
1280                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1281                         lpfc_rcv_seq_check_edtov(vports[i]);
1282                         lpfc_fdmi_num_disc_check(vports[i]);
1283                 }
1284         lpfc_destroy_vport_work_array(phba, vports);
1285
1286         if ((phba->link_state == LPFC_HBA_ERROR) ||
1287                 (phba->pport->load_flag & FC_UNLOADING) ||
1288                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1289                 return;
1290
1291         if (phba->cfg_auto_imax) {
1292                 if (!phba->last_eqdelay_time) {
1293                         phba->last_eqdelay_time = jiffies;
1294                         goto skip_eqdelay;
1295                 }
1296                 time_elapsed = jiffies - phba->last_eqdelay_time;
1297                 phba->last_eqdelay_time = jiffies;
1298
1299                 tot = 0xffff;
1300                 /* Check outstanding IO count */
1301                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1302                         if (phba->nvmet_support) {
1303                                 tgtp = phba->targetport->private;
1304                                 /* Calculate outstanding IOs */
1305                                 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
1306                                 tot += atomic_read(&tgtp->xmt_fcp_release);
1307                                 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
1308                         } else {
1309                                 localport = phba->pport->localport;
1310                                 if (!localport || !localport->private)
1311                                         goto skip_eqdelay;
1312                                 lport = (struct lpfc_nvme_lport *)
1313                                         localport->private;
1314                                 tot = 0;
1315                                 for (i = 0;
1316                                         i < phba->cfg_nvme_io_channel; i++) {
1317                                         cstat = &lport->cstat[i];
1318                                         data1 = atomic_read(
1319                                                 &cstat->fc4NvmeInputRequests);
1320                                         data2 = atomic_read(
1321                                                 &cstat->fc4NvmeOutputRequests);
1322                                         data3 = atomic_read(
1323                                                 &cstat->fc4NvmeControlRequests);
1324                                         tot += (data1 + data2 + data3);
1325                                         tot -= atomic_read(
1326                                                 &cstat->fc4NvmeIoCmpls);
1327                                 }
1328                         }
1329                 }
1330
1331                 /* Interrupts per sec per EQ */
1332                 val = phba->cfg_fcp_imax / phba->io_channel_irqs;
1333                 tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
1334
1335                 /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
1336                 max_cqe = time_elapsed * tick_cqe;
1337
1338                 for (i = 0; i < phba->io_channel_irqs; i++) {
1339                         /* Fast-path EQ */
1340                         qp = phba->sli4_hba.hba_eq[i];
1341                         if (!qp)
1342                                 continue;
1343
1344                         /* Use no EQ delay if we don't have many outstanding
1345                          * IOs, or if we are only processing 1 CQE/ISR or less.
1346                          * Otherwise, assume we can process up to lpfc_fcp_imax
1347                          * interrupts per HBA.
1348                          */
1349                         if (tot < LPFC_NODELAY_MAX_IO ||
1350                             qp->EQ_cqe_cnt <= max_cqe)
1351                                 val = 0;
1352                         else
1353                                 val = phba->cfg_fcp_imax;
1354
1355                         if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
1356                                 /* Use EQ Delay Register method */
1357
1358                                 /* Convert for EQ Delay register */
1359                                 if (val) {
1360                                         /* First, interrupts per sec per EQ */
1361                                         val = phba->cfg_fcp_imax /
1362                                                 phba->io_channel_irqs;
1363
1364                                         /* us delay between each interrupt */
1365                                         val = LPFC_SEC_TO_USEC / val;
1366                                 }
1367                                 if (val != qp->q_mode) {
1368                                         reg_data.word0 = 0;
1369                                         bf_set(lpfc_sliport_eqdelay_id,
1370                                                &reg_data, qp->queue_id);
1371                                         bf_set(lpfc_sliport_eqdelay_delay,
1372                                                &reg_data, val);
1373                                         writel(reg_data.word0, eqdreg);
1374                                 }
1375                         } else {
1376                                 /* Use mbox command method */
1377                                 if (val != qp->q_mode)
1378                                         lpfc_modify_hba_eq_delay(phba, i,
1379                                                                  1, val);
1380                         }
1381
1382                         /*
1383                          * val is cfg_fcp_imax or 0 for mbox delay or us delay
1384                          * between interrupts for EQDR.
1385                          */
1386                         qp->q_mode = val;
1387                         qp->EQ_cqe_cnt = 0;
1388                 }
1389         }
1390
1391 skip_eqdelay:
1392         spin_lock_irq(&phba->pport->work_port_lock);
1393
1394         if (time_after(phba->last_completion_time +
1395                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1396                         jiffies)) {
1397                 spin_unlock_irq(&phba->pport->work_port_lock);
1398                 if (!phba->hb_outstanding)
1399                         mod_timer(&phba->hb_tmofunc,
1400                                 jiffies +
1401                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1402                 else
1403                         mod_timer(&phba->hb_tmofunc,
1404                                 jiffies +
1405                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1406                 return;
1407         }
1408         spin_unlock_irq(&phba->pport->work_port_lock);
1409
1410         if (phba->elsbuf_cnt &&
1411                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1412                 spin_lock_irq(&phba->hbalock);
1413                 list_splice_init(&phba->elsbuf, &completions);
1414                 phba->elsbuf_cnt = 0;
1415                 phba->elsbuf_prev_cnt = 0;
1416                 spin_unlock_irq(&phba->hbalock);
1417
1418                 while (!list_empty(&completions)) {
1419                         list_remove_head(&completions, buf_ptr,
1420                                 struct lpfc_dmabuf, list);
1421                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1422                         kfree(buf_ptr);
1423                 }
1424         }
1425         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1426
1427         /* If there is no heart beat outstanding, issue a heartbeat command */
1428         if (phba->cfg_enable_hba_heartbeat) {
1429                 if (!phba->hb_outstanding) {
1430                         if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1431                                 (list_empty(&psli->mboxq))) {
1432                                 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1433                                                         GFP_KERNEL);
1434                                 if (!pmboxq) {
1435                                         mod_timer(&phba->hb_tmofunc,
1436                                                  jiffies +
1437                                                  msecs_to_jiffies(1000 *
1438                                                  LPFC_HB_MBOX_INTERVAL));
1439                                         return;
1440                                 }
1441
1442                                 lpfc_heart_beat(phba, pmboxq);
1443                                 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1444                                 pmboxq->vport = phba->pport;
1445                                 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1446                                                 MBX_NOWAIT);
1447
1448                                 if (retval != MBX_BUSY &&
1449                                         retval != MBX_SUCCESS) {
1450                                         mempool_free(pmboxq,
1451                                                         phba->mbox_mem_pool);
1452                                         mod_timer(&phba->hb_tmofunc,
1453                                                 jiffies +
1454                                                 msecs_to_jiffies(1000 *
1455                                                 LPFC_HB_MBOX_INTERVAL));
1456                                         return;
1457                                 }
1458                                 phba->skipped_hb = 0;
1459                                 phba->hb_outstanding = 1;
1460                         } else if (time_before_eq(phba->last_completion_time,
1461                                         phba->skipped_hb)) {
1462                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1463                                         "2857 Last completion time not "
1464                                         " updated in %d ms\n",
1465                                         jiffies_to_msecs(jiffies
1466                                                  - phba->last_completion_time));
1467                         } else
1468                                 phba->skipped_hb = jiffies;
1469
1470                         mod_timer(&phba->hb_tmofunc,
1471                                  jiffies +
1472                                  msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1473                         return;
1474                 } else {
1475                         /*
1476                         * If heart beat timeout called with hb_outstanding set
1477                         * we need to give the hb mailbox cmd a chance to
1478                         * complete or TMO.
1479                         */
1480                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1481                                         "0459 Adapter heartbeat still out"
1482                                         "standing:last compl time was %d ms.\n",
1483                                         jiffies_to_msecs(jiffies
1484                                                  - phba->last_completion_time));
1485                         mod_timer(&phba->hb_tmofunc,
1486                                 jiffies +
1487                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1488                 }
1489         } else {
1490                         mod_timer(&phba->hb_tmofunc,
1491                                 jiffies +
1492                                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1493         }
1494 }
1495
1496 /**
1497  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1498  * @phba: pointer to lpfc hba data structure.
1499  *
1500  * This routine is called to bring the HBA offline when HBA hardware error
1501  * other than Port Error 6 has been detected.
1502  **/
1503 static void
1504 lpfc_offline_eratt(struct lpfc_hba *phba)
1505 {
1506         struct lpfc_sli   *psli = &phba->sli;
1507
1508         spin_lock_irq(&phba->hbalock);
1509         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1510         spin_unlock_irq(&phba->hbalock);
1511         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1512
1513         lpfc_offline(phba);
1514         lpfc_reset_barrier(phba);
1515         spin_lock_irq(&phba->hbalock);
1516         lpfc_sli_brdreset(phba);
1517         spin_unlock_irq(&phba->hbalock);
1518         lpfc_hba_down_post(phba);
1519         lpfc_sli_brdready(phba, HS_MBRDY);
1520         lpfc_unblock_mgmt_io(phba);
1521         phba->link_state = LPFC_HBA_ERROR;
1522         return;
1523 }
1524
1525 /**
1526  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1527  * @phba: pointer to lpfc hba data structure.
1528  *
1529  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1530  * other than Port Error 6 has been detected.
1531  **/
1532 void
1533 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1534 {
1535         spin_lock_irq(&phba->hbalock);
1536         phba->link_state = LPFC_HBA_ERROR;
1537         spin_unlock_irq(&phba->hbalock);
1538
1539         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1540         lpfc_offline(phba);
1541         lpfc_hba_down_post(phba);
1542         lpfc_unblock_mgmt_io(phba);
1543 }
1544
1545 /**
1546  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1547  * @phba: pointer to lpfc hba data structure.
1548  *
1549  * This routine is invoked to handle the deferred HBA hardware error
1550  * conditions. This type of error is indicated by HBA by setting ER1
1551  * and another ER bit in the host status register. The driver will
1552  * wait until the ER1 bit clears before handling the error condition.
1553  **/
1554 static void
1555 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1556 {
1557         uint32_t old_host_status = phba->work_hs;
1558         struct lpfc_sli *psli = &phba->sli;
1559
1560         /* If the pci channel is offline, ignore possible errors,
1561          * since we cannot communicate with the pci card anyway.
1562          */
1563         if (pci_channel_offline(phba->pcidev)) {
1564                 spin_lock_irq(&phba->hbalock);
1565                 phba->hba_flag &= ~DEFER_ERATT;
1566                 spin_unlock_irq(&phba->hbalock);
1567                 return;
1568         }
1569
1570         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1571                 "0479 Deferred Adapter Hardware Error "
1572                 "Data: x%x x%x x%x\n",
1573                 phba->work_hs,
1574                 phba->work_status[0], phba->work_status[1]);
1575
1576         spin_lock_irq(&phba->hbalock);
1577         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1578         spin_unlock_irq(&phba->hbalock);
1579
1580
1581         /*
1582          * Firmware stops when it triggred erratt. That could cause the I/Os
1583          * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1584          * SCSI layer retry it after re-establishing link.
1585          */
1586         lpfc_sli_abort_fcp_rings(phba);
1587
1588         /*
1589          * There was a firmware error. Take the hba offline and then
1590          * attempt to restart it.
1591          */
1592         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1593         lpfc_offline(phba);
1594
1595         /* Wait for the ER1 bit to clear.*/
1596         while (phba->work_hs & HS_FFER1) {
1597                 msleep(100);
1598                 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1599                         phba->work_hs = UNPLUG_ERR ;
1600                         break;
1601                 }
1602                 /* If driver is unloading let the worker thread continue */
1603                 if (phba->pport->load_flag & FC_UNLOADING) {
1604                         phba->work_hs = 0;
1605                         break;
1606                 }
1607         }
1608
1609         /*
1610          * This is to ptrotect against a race condition in which
1611          * first write to the host attention register clear the
1612          * host status register.
1613          */
1614         if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1615                 phba->work_hs = old_host_status & ~HS_FFER1;
1616
1617         spin_lock_irq(&phba->hbalock);
1618         phba->hba_flag &= ~DEFER_ERATT;
1619         spin_unlock_irq(&phba->hbalock);
1620         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1621         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1622 }
1623
1624 static void
1625 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1626 {
1627         struct lpfc_board_event_header board_event;
1628         struct Scsi_Host *shost;
1629
1630         board_event.event_type = FC_REG_BOARD_EVENT;
1631         board_event.subcategory = LPFC_EVENT_PORTINTERR;
1632         shost = lpfc_shost_from_vport(phba->pport);
1633         fc_host_post_vendor_event(shost, fc_get_event_number(),
1634                                   sizeof(board_event),
1635                                   (char *) &board_event,
1636                                   LPFC_NL_VENDOR_ID);
1637 }
1638
1639 /**
1640  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1641  * @phba: pointer to lpfc hba data structure.
1642  *
1643  * This routine is invoked to handle the following HBA hardware error
1644  * conditions:
1645  * 1 - HBA error attention interrupt
1646  * 2 - DMA ring index out of range
1647  * 3 - Mailbox command came back as unknown
1648  **/
1649 static void
1650 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1651 {
1652         struct lpfc_vport *vport = phba->pport;
1653         struct lpfc_sli   *psli = &phba->sli;
1654         uint32_t event_data;
1655         unsigned long temperature;
1656         struct temp_event temp_event_data;
1657         struct Scsi_Host  *shost;
1658
1659         /* If the pci channel is offline, ignore possible errors,
1660          * since we cannot communicate with the pci card anyway.
1661          */
1662         if (pci_channel_offline(phba->pcidev)) {
1663                 spin_lock_irq(&phba->hbalock);
1664                 phba->hba_flag &= ~DEFER_ERATT;
1665                 spin_unlock_irq(&phba->hbalock);
1666                 return;
1667         }
1668
1669         /* If resets are disabled then leave the HBA alone and return */
1670         if (!phba->cfg_enable_hba_reset)
1671                 return;
1672
1673         /* Send an internal error event to mgmt application */
1674         lpfc_board_errevt_to_mgmt(phba);
1675
1676         if (phba->hba_flag & DEFER_ERATT)
1677                 lpfc_handle_deferred_eratt(phba);
1678
1679         if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1680                 if (phba->work_hs & HS_FFER6)
1681                         /* Re-establishing Link */
1682                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1683                                         "1301 Re-establishing Link "
1684                                         "Data: x%x x%x x%x\n",
1685                                         phba->work_hs, phba->work_status[0],
1686                                         phba->work_status[1]);
1687                 if (phba->work_hs & HS_FFER8)
1688                         /* Device Zeroization */
1689                         lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1690                                         "2861 Host Authentication device "
1691                                         "zeroization Data:x%x x%x x%x\n",
1692                                         phba->work_hs, phba->work_status[0],
1693                                         phba->work_status[1]);
1694
1695                 spin_lock_irq(&phba->hbalock);
1696                 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1697                 spin_unlock_irq(&phba->hbalock);
1698
1699                 /*
1700                 * Firmware stops when it triggled erratt with HS_FFER6.
1701                 * That could cause the I/Os dropped by the firmware.
1702                 * Error iocb (I/O) on txcmplq and let the SCSI layer
1703                 * retry it after re-establishing link.
1704                 */
1705                 lpfc_sli_abort_fcp_rings(phba);
1706
1707                 /*
1708                  * There was a firmware error.  Take the hba offline and then
1709                  * attempt to restart it.
1710                  */
1711                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1712                 lpfc_offline(phba);
1713                 lpfc_sli_brdrestart(phba);
1714                 if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1715                         lpfc_unblock_mgmt_io(phba);
1716                         return;
1717                 }
1718                 lpfc_unblock_mgmt_io(phba);
1719         } else if (phba->work_hs & HS_CRIT_TEMP) {
1720                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1721                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1722                 temp_event_data.event_code = LPFC_CRIT_TEMP;
1723                 temp_event_data.data = (uint32_t)temperature;
1724
1725                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1726                                 "0406 Adapter maximum temperature exceeded "
1727                                 "(%ld), taking this port offline "
1728                                 "Data: x%x x%x x%x\n",
1729                                 temperature, phba->work_hs,
1730                                 phba->work_status[0], phba->work_status[1]);
1731
1732                 shost = lpfc_shost_from_vport(phba->pport);
1733                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1734                                           sizeof(temp_event_data),
1735                                           (char *) &temp_event_data,
1736                                           SCSI_NL_VID_TYPE_PCI
1737                                           | PCI_VENDOR_ID_EMULEX);
1738
1739                 spin_lock_irq(&phba->hbalock);
1740                 phba->over_temp_state = HBA_OVER_TEMP;
1741                 spin_unlock_irq(&phba->hbalock);
1742                 lpfc_offline_eratt(phba);
1743
1744         } else {
1745                 /* The if clause above forces this code path when the status
1746                  * failure is a value other than FFER6. Do not call the offline
1747                  * twice. This is the adapter hardware error path.
1748                  */
1749                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1750                                 "0457 Adapter Hardware Error "
1751                                 "Data: x%x x%x x%x\n",
1752                                 phba->work_hs,
1753                                 phba->work_status[0], phba->work_status[1]);
1754
1755                 event_data = FC_REG_DUMP_EVENT;
1756                 shost = lpfc_shost_from_vport(vport);
1757                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1758                                 sizeof(event_data), (char *) &event_data,
1759                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1760
1761                 lpfc_offline_eratt(phba);
1762         }
1763         return;
1764 }
1765
1766 /**
1767  * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1768  * @phba: pointer to lpfc hba data structure.
1769  * @mbx_action: flag for mailbox shutdown action.
1770  *
1771  * This routine is invoked to perform an SLI4 port PCI function reset in
1772  * response to port status register polling attention. It waits for port
1773  * status register (ERR, RDY, RN) bits before proceeding with function reset.
1774  * During this process, interrupt vectors are freed and later requested
1775  * for handling possible port resource change.
1776  **/
1777 static int
1778 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1779                             bool en_rn_msg)
1780 {
1781         int rc;
1782         uint32_t intr_mode;
1783
1784         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1785             LPFC_SLI_INTF_IF_TYPE_2) {
1786                 /*
1787                  * On error status condition, driver need to wait for port
1788                  * ready before performing reset.
1789                  */
1790                 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1791                 if (rc)
1792                         return rc;
1793         }
1794
1795         /* need reset: attempt for port recovery */
1796         if (en_rn_msg)
1797                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1798                                 "2887 Reset Needed: Attempting Port "
1799                                 "Recovery...\n");
1800         lpfc_offline_prep(phba, mbx_action);
1801         lpfc_offline(phba);
1802         /* release interrupt for possible resource change */
1803         lpfc_sli4_disable_intr(phba);
1804         lpfc_sli_brdrestart(phba);
1805         /* request and enable interrupt */
1806         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1807         if (intr_mode == LPFC_INTR_ERROR) {
1808                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1809                                 "3175 Failed to enable interrupt\n");
1810                 return -EIO;
1811         }
1812         phba->intr_mode = intr_mode;
1813         rc = lpfc_online(phba);
1814         if (rc == 0)
1815                 lpfc_unblock_mgmt_io(phba);
1816
1817         return rc;
1818 }
1819
1820 /**
1821  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1822  * @phba: pointer to lpfc hba data structure.
1823  *
1824  * This routine is invoked to handle the SLI4 HBA hardware error attention
1825  * conditions.
1826  **/
1827 static void
1828 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1829 {
1830         struct lpfc_vport *vport = phba->pport;
1831         uint32_t event_data;
1832         struct Scsi_Host *shost;
1833         uint32_t if_type;
1834         struct lpfc_register portstat_reg = {0};
1835         uint32_t reg_err1, reg_err2;
1836         uint32_t uerrlo_reg, uemasklo_reg;
1837         uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1838         bool en_rn_msg = true;
1839         struct temp_event temp_event_data;
1840         struct lpfc_register portsmphr_reg;
1841         int rc, i;
1842
1843         /* If the pci channel is offline, ignore possible errors, since
1844          * we cannot communicate with the pci card anyway.
1845          */
1846         if (pci_channel_offline(phba->pcidev))
1847                 return;
1848
1849         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1850         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1851         switch (if_type) {
1852         case LPFC_SLI_INTF_IF_TYPE_0:
1853                 pci_rd_rc1 = lpfc_readl(
1854                                 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1855                                 &uerrlo_reg);
1856                 pci_rd_rc2 = lpfc_readl(
1857                                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1858                                 &uemasklo_reg);
1859                 /* consider PCI bus read error as pci_channel_offline */
1860                 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1861                         return;
1862                 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1863                         lpfc_sli4_offline_eratt(phba);
1864                         return;
1865                 }
1866                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1867                                 "7623 Checking UE recoverable");
1868
1869                 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1870                         if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1871                                        &portsmphr_reg.word0))
1872                                 continue;
1873
1874                         smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1875                                                    &portsmphr_reg);
1876                         if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1877                             LPFC_PORT_SEM_UE_RECOVERABLE)
1878                                 break;
1879                         /*Sleep for 1Sec, before checking SEMAPHORE */
1880                         msleep(1000);
1881                 }
1882
1883                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1884                                 "4827 smphr_port_status x%x : Waited %dSec",
1885                                 smphr_port_status, i);
1886
1887                 /* Recoverable UE, reset the HBA device */
1888                 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1889                     LPFC_PORT_SEM_UE_RECOVERABLE) {
1890                         for (i = 0; i < 20; i++) {
1891                                 msleep(1000);
1892                                 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1893                                     &portsmphr_reg.word0) &&
1894                                     (LPFC_POST_STAGE_PORT_READY ==
1895                                      bf_get(lpfc_port_smphr_port_status,
1896                                      &portsmphr_reg))) {
1897                                         rc = lpfc_sli4_port_sta_fn_reset(phba,
1898                                                 LPFC_MBX_NO_WAIT, en_rn_msg);
1899                                         if (rc == 0)
1900                                                 return;
1901                                         lpfc_printf_log(phba,
1902                                                 KERN_ERR, LOG_INIT,
1903                                                 "4215 Failed to recover UE");
1904                                         break;
1905                                 }
1906                         }
1907                 }
1908                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1909                                 "7624 Firmware not ready: Failing UE recovery,"
1910                                 " waited %dSec", i);
1911                 lpfc_sli4_offline_eratt(phba);
1912                 break;
1913
1914         case LPFC_SLI_INTF_IF_TYPE_2:
1915         case LPFC_SLI_INTF_IF_TYPE_6:
1916                 pci_rd_rc1 = lpfc_readl(
1917                                 phba->sli4_hba.u.if_type2.STATUSregaddr,
1918                                 &portstat_reg.word0);
1919                 /* consider PCI bus read error as pci_channel_offline */
1920                 if (pci_rd_rc1 == -EIO) {
1921                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1922                                 "3151 PCI bus read access failure: x%x\n",
1923                                 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1924                         return;
1925                 }
1926                 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1927                 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1928                 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1929                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1930                                 "2889 Port Overtemperature event, "
1931                                 "taking port offline Data: x%x x%x\n",
1932                                 reg_err1, reg_err2);
1933
1934                         phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1935                         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1936                         temp_event_data.event_code = LPFC_CRIT_TEMP;
1937                         temp_event_data.data = 0xFFFFFFFF;
1938
1939                         shost = lpfc_shost_from_vport(phba->pport);
1940                         fc_host_post_vendor_event(shost, fc_get_event_number(),
1941                                                   sizeof(temp_event_data),
1942                                                   (char *)&temp_event_data,
1943                                                   SCSI_NL_VID_TYPE_PCI
1944                                                   | PCI_VENDOR_ID_EMULEX);
1945
1946                         spin_lock_irq(&phba->hbalock);
1947                         phba->over_temp_state = HBA_OVER_TEMP;
1948                         spin_unlock_irq(&phba->hbalock);
1949                         lpfc_sli4_offline_eratt(phba);
1950                         return;
1951                 }
1952                 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1953                     reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1954                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1955                                         "3143 Port Down: Firmware Update "
1956                                         "Detected\n");
1957                         en_rn_msg = false;
1958                 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1959                          reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1960                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1961                                         "3144 Port Down: Debug Dump\n");
1962                 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1963                          reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1964                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1965                                         "3145 Port Down: Provisioning\n");
1966
1967                 /* If resets are disabled then leave the HBA alone and return */
1968                 if (!phba->cfg_enable_hba_reset)
1969                         return;
1970
1971                 /* Check port status register for function reset */
1972                 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1973                                 en_rn_msg);
1974                 if (rc == 0) {
1975                         /* don't report event on forced debug dump */
1976                         if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1977                             reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1978                                 return;
1979                         else
1980                                 break;
1981                 }
1982                 /* fall through for not able to recover */
1983                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1984                                 "3152 Unrecoverable error, bring the port "
1985                                 "offline\n");
1986                 lpfc_sli4_offline_eratt(phba);
1987                 break;
1988         case LPFC_SLI_INTF_IF_TYPE_1:
1989         default:
1990                 break;
1991         }
1992         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1993                         "3123 Report dump event to upper layer\n");
1994         /* Send an internal error event to mgmt application */
1995         lpfc_board_errevt_to_mgmt(phba);
1996
1997         event_data = FC_REG_DUMP_EVENT;
1998         shost = lpfc_shost_from_vport(vport);
1999         fc_host_post_vendor_event(shost, fc_get_event_number(),
2000                                   sizeof(event_data), (char *) &event_data,
2001                                   SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2002 }
2003
2004 /**
2005  * lpfc_handle_eratt - Wrapper func for handling hba error attention
2006  * @phba: pointer to lpfc HBA data structure.
2007  *
2008  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2009  * routine from the API jump table function pointer from the lpfc_hba struct.
2010  *
2011  * Return codes
2012  *   0 - success.
2013  *   Any other value - error.
2014  **/
2015 void
2016 lpfc_handle_eratt(struct lpfc_hba *phba)
2017 {
2018         (*phba->lpfc_handle_eratt)(phba);
2019 }
2020
2021 /**
2022  * lpfc_handle_latt - The HBA link event handler
2023  * @phba: pointer to lpfc hba data structure.
2024  *
2025  * This routine is invoked from the worker thread to handle a HBA host
2026  * attention link event. SLI3 only.
2027  **/
2028 void
2029 lpfc_handle_latt(struct lpfc_hba *phba)
2030 {
2031         struct lpfc_vport *vport = phba->pport;
2032         struct lpfc_sli   *psli = &phba->sli;
2033         LPFC_MBOXQ_t *pmb;
2034         volatile uint32_t control;
2035         struct lpfc_dmabuf *mp;
2036         int rc = 0;
2037
2038         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2039         if (!pmb) {
2040                 rc = 1;
2041                 goto lpfc_handle_latt_err_exit;
2042         }
2043
2044         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2045         if (!mp) {
2046                 rc = 2;
2047                 goto lpfc_handle_latt_free_pmb;
2048         }
2049
2050         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2051         if (!mp->virt) {
2052                 rc = 3;
2053                 goto lpfc_handle_latt_free_mp;
2054         }
2055
2056         /* Cleanup any outstanding ELS commands */
2057         lpfc_els_flush_all_cmd(phba);
2058
2059         psli->slistat.link_event++;
2060         lpfc_read_topology(phba, pmb, mp);
2061         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2062         pmb->vport = vport;
2063         /* Block ELS IOCBs until we have processed this mbox command */
2064         phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2065         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2066         if (rc == MBX_NOT_FINISHED) {
2067                 rc = 4;
2068                 goto lpfc_handle_latt_free_mbuf;
2069         }
2070
2071         /* Clear Link Attention in HA REG */
2072         spin_lock_irq(&phba->hbalock);
2073         writel(HA_LATT, phba->HAregaddr);
2074         readl(phba->HAregaddr); /* flush */
2075         spin_unlock_irq(&phba->hbalock);
2076
2077         return;
2078
2079 lpfc_handle_latt_free_mbuf:
2080         phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2081         lpfc_mbuf_free(phba, mp->virt, mp->phys);
2082 lpfc_handle_latt_free_mp:
2083         kfree(mp);
2084 lpfc_handle_latt_free_pmb:
2085         mempool_free(pmb, phba->mbox_mem_pool);
2086 lpfc_handle_latt_err_exit:
2087         /* Enable Link attention interrupts */
2088         spin_lock_irq(&phba->hbalock);
2089         psli->sli_flag |= LPFC_PROCESS_LA;
2090         control = readl(phba->HCregaddr);
2091         control |= HC_LAINT_ENA;
2092         writel(control, phba->HCregaddr);
2093         readl(phba->HCregaddr); /* flush */
2094
2095         /* Clear Link Attention in HA REG */
2096         writel(HA_LATT, phba->HAregaddr);
2097         readl(phba->HAregaddr); /* flush */
2098         spin_unlock_irq(&phba->hbalock);
2099         lpfc_linkdown(phba);
2100         phba->link_state = LPFC_HBA_ERROR;
2101
2102         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2103                      "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2104
2105         return;
2106 }
2107
2108 /**
2109  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2110  * @phba: pointer to lpfc hba data structure.
2111  * @vpd: pointer to the vital product data.
2112  * @len: length of the vital product data in bytes.
2113  *
2114  * This routine parses the Vital Product Data (VPD). The VPD is treated as
2115  * an array of characters. In this routine, the ModelName, ProgramType, and
2116  * ModelDesc, etc. fields of the phba data structure will be populated.
2117  *
2118  * Return codes
2119  *   0 - pointer to the VPD passed in is NULL
2120  *   1 - success
2121  **/
2122 int
2123 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2124 {
2125         uint8_t lenlo, lenhi;
2126         int Length;
2127         int i, j;
2128         int finished = 0;
2129         int index = 0;
2130
2131         if (!vpd)
2132                 return 0;
2133
2134         /* Vital Product */
2135         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2136                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
2137                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2138                         (uint32_t) vpd[3]);
2139         while (!finished && (index < (len - 4))) {
2140                 switch (vpd[index]) {
2141                 case 0x82:
2142                 case 0x91:
2143                         index += 1;
2144                         lenlo = vpd[index];
2145                         index += 1;
2146                         lenhi = vpd[index];
2147                         index += 1;
2148                         i = ((((unsigned short)lenhi) << 8) + lenlo);
2149                         index += i;
2150                         break;
2151                 case 0x90:
2152                         index += 1;
2153                         lenlo = vpd[index];
2154                         index += 1;
2155                         lenhi = vpd[index];
2156                         index += 1;
2157                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
2158                         if (Length > len - index)
2159                                 Length = len - index;
2160                         while (Length > 0) {
2161                         /* Look for Serial Number */
2162                         if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2163                                 index += 2;
2164                                 i = vpd[index];
2165                                 index += 1;
2166                                 j = 0;
2167                                 Length -= (3+i);
2168                                 while(i--) {
2169                                         phba->SerialNumber[j++] = vpd[index++];
2170                                         if (j == 31)
2171                                                 break;
2172                                 }
2173                                 phba->SerialNumber[j] = 0;
2174                                 continue;
2175                         }
2176                         else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2177                                 phba->vpd_flag |= VPD_MODEL_DESC;
2178                                 index += 2;
2179                                 i = vpd[index];
2180                                 index += 1;
2181                                 j = 0;
2182                                 Length -= (3+i);
2183                                 while(i--) {
2184                                         phba->ModelDesc[j++] = vpd[index++];
2185                                         if (j == 255)
2186                                                 break;
2187                                 }
2188                                 phba->ModelDesc[j] = 0;
2189                                 continue;
2190                         }
2191                         else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2192                                 phba->vpd_flag |= VPD_MODEL_NAME;
2193                                 index += 2;
2194                                 i = vpd[index];
2195                                 index += 1;
2196                                 j = 0;
2197                                 Length -= (3+i);
2198                                 while(i--) {
2199                                         phba->ModelName[j++] = vpd[index++];
2200                                         if (j == 79)
2201                                                 break;
2202                                 }
2203                                 phba->ModelName[j] = 0;
2204                                 continue;
2205                         }
2206                         else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2207                                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2208                                 index += 2;
2209                                 i = vpd[index];
2210                                 index += 1;
2211                                 j = 0;
2212                                 Length -= (3+i);
2213                                 while(i--) {
2214                                         phba->ProgramType[j++] = vpd[index++];
2215                                         if (j == 255)
2216                                                 break;
2217                                 }
2218                                 phba->ProgramType[j] = 0;
2219                                 continue;
2220                         }
2221                         else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2222                                 phba->vpd_flag |= VPD_PORT;
2223                                 index += 2;
2224                                 i = vpd[index];
2225                                 index += 1;
2226                                 j = 0;
2227                                 Length -= (3+i);
2228                                 while(i--) {
2229                                         if ((phba->sli_rev == LPFC_SLI_REV4) &&
2230                                             (phba->sli4_hba.pport_name_sta ==
2231                                              LPFC_SLI4_PPNAME_GET)) {
2232                                                 j++;
2233                                                 index++;
2234                                         } else
2235                                                 phba->Port[j++] = vpd[index++];
2236                                         if (j == 19)
2237                                                 break;
2238                                 }
2239                                 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2240                                     (phba->sli4_hba.pport_name_sta ==
2241                                      LPFC_SLI4_PPNAME_NON))
2242                                         phba->Port[j] = 0;
2243                                 continue;
2244                         }
2245                         else {
2246                                 index += 2;
2247                                 i = vpd[index];
2248                                 index += 1;
2249                                 index += i;
2250                                 Length -= (3 + i);
2251                         }
2252                 }
2253                 finished = 0;
2254                 break;
2255                 case 0x78:
2256                         finished = 1;
2257                         break;
2258                 default:
2259                         index ++;
2260                         break;
2261                 }
2262         }
2263
2264         return(1);
2265 }
2266
2267 /**
2268  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2269  * @phba: pointer to lpfc hba data structure.
2270  * @mdp: pointer to the data structure to hold the derived model name.
2271  * @descp: pointer to the data structure to hold the derived description.
2272  *
2273  * This routine retrieves HBA's description based on its registered PCI device
2274  * ID. The @descp passed into this function points to an array of 256 chars. It
2275  * shall be returned with the model name, maximum speed, and the host bus type.
2276  * The @mdp passed into this function points to an array of 80 chars. When the
2277  * function returns, the @mdp will be filled with the model name.
2278  **/
2279 static void
2280 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2281 {
2282         lpfc_vpd_t *vp;
2283         uint16_t dev_id = phba->pcidev->device;
2284         int max_speed;
2285         int GE = 0;
2286         int oneConnect = 0; /* default is not a oneConnect */
2287         struct {
2288                 char *name;
2289                 char *bus;
2290                 char *function;
2291         } m = {"<Unknown>", "", ""};
2292
2293         if (mdp && mdp[0] != '\0'
2294                 && descp && descp[0] != '\0')
2295                 return;
2296
2297         if (phba->lmt & LMT_64Gb)
2298                 max_speed = 64;
2299         else if (phba->lmt & LMT_32Gb)
2300                 max_speed = 32;
2301         else if (phba->lmt & LMT_16Gb)
2302                 max_speed = 16;
2303         else if (phba->lmt & LMT_10Gb)
2304                 max_speed = 10;
2305         else if (phba->lmt & LMT_8Gb)
2306                 max_speed = 8;
2307         else if (phba->lmt & LMT_4Gb)
2308                 max_speed = 4;
2309         else if (phba->lmt & LMT_2Gb)
2310                 max_speed = 2;
2311         else if (phba->lmt & LMT_1Gb)
2312                 max_speed = 1;
2313         else
2314                 max_speed = 0;
2315
2316         vp = &phba->vpd;
2317
2318         switch (dev_id) {
2319         case PCI_DEVICE_ID_FIREFLY:
2320                 m = (typeof(m)){"LP6000", "PCI",
2321                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2322                 break;
2323         case PCI_DEVICE_ID_SUPERFLY:
2324                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2325                         m = (typeof(m)){"LP7000", "PCI", ""};
2326                 else
2327                         m = (typeof(m)){"LP7000E", "PCI", ""};
2328                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2329                 break;
2330         case PCI_DEVICE_ID_DRAGONFLY:
2331                 m = (typeof(m)){"LP8000", "PCI",
2332                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2333                 break;
2334         case PCI_DEVICE_ID_CENTAUR:
2335                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2336                         m = (typeof(m)){"LP9002", "PCI", ""};
2337                 else
2338                         m = (typeof(m)){"LP9000", "PCI", ""};
2339                 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2340                 break;
2341         case PCI_DEVICE_ID_RFLY:
2342                 m = (typeof(m)){"LP952", "PCI",
2343                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2344                 break;
2345         case PCI_DEVICE_ID_PEGASUS:
2346                 m = (typeof(m)){"LP9802", "PCI-X",
2347                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2348                 break;
2349         case PCI_DEVICE_ID_THOR:
2350                 m = (typeof(m)){"LP10000", "PCI-X",
2351                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2352                 break;
2353         case PCI_DEVICE_ID_VIPER:
2354                 m = (typeof(m)){"LPX1000",  "PCI-X",
2355                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2356                 break;
2357         case PCI_DEVICE_ID_PFLY:
2358                 m = (typeof(m)){"LP982", "PCI-X",
2359                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2360                 break;
2361         case PCI_DEVICE_ID_TFLY:
2362                 m = (typeof(m)){"LP1050", "PCI-X",
2363                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2364                 break;
2365         case PCI_DEVICE_ID_HELIOS:
2366                 m = (typeof(m)){"LP11000", "PCI-X2",
2367                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2368                 break;
2369         case PCI_DEVICE_ID_HELIOS_SCSP:
2370                 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2371                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2372                 break;
2373         case PCI_DEVICE_ID_HELIOS_DCSP:
2374                 m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2375                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2376                 break;
2377         case PCI_DEVICE_ID_NEPTUNE:
2378                 m = (typeof(m)){"LPe1000", "PCIe",
2379                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2380                 break;
2381         case PCI_DEVICE_ID_NEPTUNE_SCSP:
2382                 m = (typeof(m)){"LPe1000-SP", "PCIe",
2383                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2384                 break;
2385         case PCI_DEVICE_ID_NEPTUNE_DCSP:
2386                 m = (typeof(m)){"LPe1002-SP", "PCIe",
2387                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2388                 break;
2389         case PCI_DEVICE_ID_BMID:
2390                 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2391                 break;
2392         case PCI_DEVICE_ID_BSMB:
2393                 m = (typeof(m)){"LP111", "PCI-X2",
2394                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2395                 break;
2396         case PCI_DEVICE_ID_ZEPHYR:
2397                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2398                 break;
2399         case PCI_DEVICE_ID_ZEPHYR_SCSP:
2400                 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2401                 break;
2402         case PCI_DEVICE_ID_ZEPHYR_DCSP:
2403                 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2404                 GE = 1;
2405                 break;
2406         case PCI_DEVICE_ID_ZMID:
2407                 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2408                 break;
2409         case PCI_DEVICE_ID_ZSMB:
2410                 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2411                 break;
2412         case PCI_DEVICE_ID_LP101:
2413                 m = (typeof(m)){"LP101", "PCI-X",
2414                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2415                 break;
2416         case PCI_DEVICE_ID_LP10000S:
2417                 m = (typeof(m)){"LP10000-S", "PCI",
2418                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2419                 break;
2420         case PCI_DEVICE_ID_LP11000S:
2421                 m = (typeof(m)){"LP11000-S", "PCI-X2",
2422                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2423                 break;
2424         case PCI_DEVICE_ID_LPE11000S:
2425                 m = (typeof(m)){"LPe11000-S", "PCIe",
2426                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2427                 break;
2428         case PCI_DEVICE_ID_SAT:
2429                 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2430                 break;
2431         case PCI_DEVICE_ID_SAT_MID:
2432                 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2433                 break;
2434         case PCI_DEVICE_ID_SAT_SMB:
2435                 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2436                 break;
2437         case PCI_DEVICE_ID_SAT_DCSP:
2438                 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2439                 break;
2440         case PCI_DEVICE_ID_SAT_SCSP:
2441                 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2442                 break;
2443         case PCI_DEVICE_ID_SAT_S:
2444                 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2445                 break;
2446         case PCI_DEVICE_ID_HORNET:
2447                 m = (typeof(m)){"LP21000", "PCIe",
2448                                 "Obsolete, Unsupported FCoE Adapter"};
2449                 GE = 1;
2450                 break;
2451         case PCI_DEVICE_ID_PROTEUS_VF:
2452                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2453                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2454                 break;
2455         case PCI_DEVICE_ID_PROTEUS_PF:
2456                 m = (typeof(m)){"LPev12000", "PCIe IOV",
2457                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2458                 break;
2459         case PCI_DEVICE_ID_PROTEUS_S:
2460                 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2461                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2462                 break;
2463         case PCI_DEVICE_ID_TIGERSHARK:
2464                 oneConnect = 1;
2465                 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2466                 break;
2467         case PCI_DEVICE_ID_TOMCAT:
2468                 oneConnect = 1;
2469                 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2470                 break;
2471         case PCI_DEVICE_ID_FALCON:
2472                 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2473                                 "EmulexSecure Fibre"};
2474                 break;
2475         case PCI_DEVICE_ID_BALIUS:
2476                 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2477                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2478                 break;
2479         case PCI_DEVICE_ID_LANCER_FC:
2480                 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2481                 break;
2482         case PCI_DEVICE_ID_LANCER_FC_VF:
2483                 m = (typeof(m)){"LPe16000", "PCIe",
2484                                 "Obsolete, Unsupported Fibre Channel Adapter"};
2485                 break;
2486         case PCI_DEVICE_ID_LANCER_FCOE:
2487                 oneConnect = 1;
2488                 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2489                 break;
2490         case PCI_DEVICE_ID_LANCER_FCOE_VF:
2491                 oneConnect = 1;
2492                 m = (typeof(m)){"OCe15100", "PCIe",
2493                                 "Obsolete, Unsupported FCoE"};
2494                 break;
2495         case PCI_DEVICE_ID_LANCER_G6_FC:
2496                 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2497                 break;
2498         case PCI_DEVICE_ID_LANCER_G7_FC:
2499                 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2500                 break;
2501         case PCI_DEVICE_ID_SKYHAWK:
2502         case PCI_DEVICE_ID_SKYHAWK_VF:
2503                 oneConnect = 1;
2504                 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2505                 break;
2506         default:
2507                 m = (typeof(m)){"Unknown", "", ""};
2508                 break;
2509         }
2510
2511         if (mdp && mdp[0] == '\0')
2512                 snprintf(mdp, 79,"%s", m.name);
2513         /*
2514          * oneConnect hba requires special processing, they are all initiators
2515          * and we put the port number on the end
2516          */
2517         if (descp && descp[0] == '\0') {
2518                 if (oneConnect)
2519                         snprintf(descp, 255,
2520                                 "Emulex OneConnect %s, %s Initiator %s",
2521                                 m.name, m.function,
2522                                 phba->Port);
2523                 else if (max_speed == 0)
2524                         snprintf(descp, 255,
2525                                 "Emulex %s %s %s",
2526                                 m.name, m.bus, m.function);
2527                 else
2528                         snprintf(descp, 255,
2529                                 "Emulex %s %d%s %s %s",
2530                                 m.name, max_speed, (GE) ? "GE" : "Gb",
2531                                 m.bus, m.function);
2532         }
2533 }
2534
2535 /**
2536  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2537  * @phba: pointer to lpfc hba data structure.
2538  * @pring: pointer to a IOCB ring.
2539  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2540  *
2541  * This routine posts a given number of IOCBs with the associated DMA buffer
2542  * descriptors specified by the cnt argument to the given IOCB ring.
2543  *
2544  * Return codes
2545  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2546  **/
2547 int
2548 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2549 {
2550         IOCB_t *icmd;
2551         struct lpfc_iocbq *iocb;
2552         struct lpfc_dmabuf *mp1, *mp2;
2553
2554         cnt += pring->missbufcnt;
2555
2556         /* While there are buffers to post */
2557         while (cnt > 0) {
2558                 /* Allocate buffer for  command iocb */
2559                 iocb = lpfc_sli_get_iocbq(phba);
2560                 if (iocb == NULL) {
2561                         pring->missbufcnt = cnt;
2562                         return cnt;
2563                 }
2564                 icmd = &iocb->iocb;
2565
2566                 /* 2 buffers can be posted per command */
2567                 /* Allocate buffer to post */
2568                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2569                 if (mp1)
2570                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2571                 if (!mp1 || !mp1->virt) {
2572                         kfree(mp1);
2573                         lpfc_sli_release_iocbq(phba, iocb);
2574                         pring->missbufcnt = cnt;
2575                         return cnt;
2576                 }
2577
2578                 INIT_LIST_HEAD(&mp1->list);
2579                 /* Allocate buffer to post */
2580                 if (cnt > 1) {
2581                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2582                         if (mp2)
2583                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2584                                                             &mp2->phys);
2585                         if (!mp2 || !mp2->virt) {
2586                                 kfree(mp2);
2587                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2588                                 kfree(mp1);
2589                                 lpfc_sli_release_iocbq(phba, iocb);
2590                                 pring->missbufcnt = cnt;
2591                                 return cnt;
2592                         }
2593
2594                         INIT_LIST_HEAD(&mp2->list);
2595                 } else {
2596                         mp2 = NULL;
2597                 }
2598
2599                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2600                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2601                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2602                 icmd->ulpBdeCount = 1;
2603                 cnt--;
2604                 if (mp2) {
2605                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2606                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2607                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2608                         cnt--;
2609                         icmd->ulpBdeCount = 2;
2610                 }
2611
2612                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2613                 icmd->ulpLe = 1;
2614
2615                 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2616                     IOCB_ERROR) {
2617                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2618                         kfree(mp1);
2619                         cnt++;
2620                         if (mp2) {
2621                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2622                                 kfree(mp2);
2623                                 cnt++;
2624                         }
2625                         lpfc_sli_release_iocbq(phba, iocb);
2626                         pring->missbufcnt = cnt;
2627                         return cnt;
2628                 }
2629                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2630                 if (mp2)
2631                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2632         }
2633         pring->missbufcnt = 0;
2634         return 0;
2635 }
2636
2637 /**
2638  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2639  * @phba: pointer to lpfc hba data structure.
2640  *
2641  * This routine posts initial receive IOCB buffers to the ELS ring. The
2642  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2643  * set to 64 IOCBs. SLI3 only.
2644  *
2645  * Return codes
2646  *   0 - success (currently always success)
2647  **/
2648 static int
2649 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2650 {
2651         struct lpfc_sli *psli = &phba->sli;
2652
2653         /* Ring 0, ELS / CT buffers */
2654         lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2655         /* Ring 2 - FCP no buffers needed */
2656
2657         return 0;
2658 }
2659
2660 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2661
2662 /**
2663  * lpfc_sha_init - Set up initial array of hash table entries
2664  * @HashResultPointer: pointer to an array as hash table.
2665  *
2666  * This routine sets up the initial values to the array of hash table entries
2667  * for the LC HBAs.
2668  **/
2669 static void
2670 lpfc_sha_init(uint32_t * HashResultPointer)
2671 {
2672         HashResultPointer[0] = 0x67452301;
2673         HashResultPointer[1] = 0xEFCDAB89;
2674         HashResultPointer[2] = 0x98BADCFE;
2675         HashResultPointer[3] = 0x10325476;
2676         HashResultPointer[4] = 0xC3D2E1F0;
2677 }
2678
2679 /**
2680  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2681  * @HashResultPointer: pointer to an initial/result hash table.
2682  * @HashWorkingPointer: pointer to an working hash table.
2683  *
2684  * This routine iterates an initial hash table pointed by @HashResultPointer
2685  * with the values from the working hash table pointeed by @HashWorkingPointer.
2686  * The results are putting back to the initial hash table, returned through
2687  * the @HashResultPointer as the result hash table.
2688  **/
2689 static void
2690 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2691 {
2692         int t;
2693         uint32_t TEMP;
2694         uint32_t A, B, C, D, E;
2695         t = 16;
2696         do {
2697                 HashWorkingPointer[t] =
2698                     S(1,
2699                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2700                                                                      8] ^
2701                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2702         } while (++t <= 79);
2703         t = 0;
2704         A = HashResultPointer[0];
2705         B = HashResultPointer[1];
2706         C = HashResultPointer[2];
2707         D = HashResultPointer[3];
2708         E = HashResultPointer[4];
2709
2710         do {
2711                 if (t < 20) {
2712                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2713                 } else if (t < 40) {
2714                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2715                 } else if (t < 60) {
2716                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2717                 } else {
2718                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2719                 }
2720                 TEMP += S(5, A) + E + HashWorkingPointer[t];
2721                 E = D;
2722                 D = C;
2723                 C = S(30, B);
2724                 B = A;
2725                 A = TEMP;
2726         } while (++t <= 79);
2727
2728         HashResultPointer[0] += A;
2729         HashResultPointer[1] += B;
2730         HashResultPointer[2] += C;
2731         HashResultPointer[3] += D;
2732         HashResultPointer[4] += E;
2733
2734 }
2735
2736 /**
2737  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2738  * @RandomChallenge: pointer to the entry of host challenge random number array.
2739  * @HashWorking: pointer to the entry of the working hash array.
2740  *
2741  * This routine calculates the working hash array referred by @HashWorking
2742  * from the challenge random numbers associated with the host, referred by
2743  * @RandomChallenge. The result is put into the entry of the working hash
2744  * array and returned by reference through @HashWorking.
2745  **/
2746 static void
2747 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2748 {
2749         *HashWorking = (*RandomChallenge ^ *HashWorking);
2750 }
2751
2752 /**
2753  * lpfc_hba_init - Perform special handling for LC HBA initialization
2754  * @phba: pointer to lpfc hba data structure.
2755  * @hbainit: pointer to an array of unsigned 32-bit integers.
2756  *
2757  * This routine performs the special handling for LC HBA initialization.
2758  **/
2759 void
2760 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2761 {
2762         int t;
2763         uint32_t *HashWorking;
2764         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2765
2766         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2767         if (!HashWorking)
2768                 return;
2769
2770         HashWorking[0] = HashWorking[78] = *pwwnn++;
2771         HashWorking[1] = HashWorking[79] = *pwwnn;
2772
2773         for (t = 0; t < 7; t++)
2774                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2775
2776         lpfc_sha_init(hbainit);
2777         lpfc_sha_iterate(hbainit, HashWorking);
2778         kfree(HashWorking);
2779 }
2780
2781 /**
2782  * lpfc_cleanup - Performs vport cleanups before deleting a vport
2783  * @vport: pointer to a virtual N_Port data structure.
2784  *
2785  * This routine performs the necessary cleanups before deleting the @vport.
2786  * It invokes the discovery state machine to perform necessary state
2787  * transitions and to release the ndlps associated with the @vport. Note,
2788  * the physical port is treated as @vport 0.
2789  **/
2790 void
2791 lpfc_cleanup(struct lpfc_vport *vport)
2792 {
2793         struct lpfc_hba   *phba = vport->phba;
2794         struct lpfc_nodelist *ndlp, *next_ndlp;
2795         int i = 0;
2796
2797         if (phba->link_state > LPFC_LINK_DOWN)
2798                 lpfc_port_link_failure(vport);
2799
2800         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2801                 if (!NLP_CHK_NODE_ACT(ndlp)) {
2802                         ndlp = lpfc_enable_node(vport, ndlp,
2803                                                 NLP_STE_UNUSED_NODE);
2804                         if (!ndlp)
2805                                 continue;
2806                         spin_lock_irq(&phba->ndlp_lock);
2807                         NLP_SET_FREE_REQ(ndlp);
2808                         spin_unlock_irq(&phba->ndlp_lock);
2809                         /* Trigger the release of the ndlp memory */
2810                         lpfc_nlp_put(ndlp);
2811                         continue;
2812                 }
2813                 spin_lock_irq(&phba->ndlp_lock);
2814                 if (NLP_CHK_FREE_REQ(ndlp)) {
2815                         /* The ndlp should not be in memory free mode already */
2816                         spin_unlock_irq(&phba->ndlp_lock);
2817                         continue;
2818                 } else
2819                         /* Indicate request for freeing ndlp memory */
2820                         NLP_SET_FREE_REQ(ndlp);
2821                 spin_unlock_irq(&phba->ndlp_lock);
2822
2823                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2824                     ndlp->nlp_DID == Fabric_DID) {
2825                         /* Just free up ndlp with Fabric_DID for vports */
2826                         lpfc_nlp_put(ndlp);
2827                         continue;
2828                 }
2829
2830                 /* take care of nodes in unused state before the state
2831                  * machine taking action.
2832                  */
2833                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2834                         lpfc_nlp_put(ndlp);
2835                         continue;
2836                 }
2837
2838                 if (ndlp->nlp_type & NLP_FABRIC)
2839                         lpfc_disc_state_machine(vport, ndlp, NULL,
2840                                         NLP_EVT_DEVICE_RECOVERY);
2841
2842                 lpfc_disc_state_machine(vport, ndlp, NULL,
2843                                              NLP_EVT_DEVICE_RM);
2844         }
2845
2846         /* At this point, ALL ndlp's should be gone
2847          * because of the previous NLP_EVT_DEVICE_RM.
2848          * Lets wait for this to happen, if needed.
2849          */
2850         while (!list_empty(&vport->fc_nodes)) {
2851                 if (i++ > 3000) {
2852                         lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2853                                 "0233 Nodelist not empty\n");
2854                         list_for_each_entry_safe(ndlp, next_ndlp,
2855                                                 &vport->fc_nodes, nlp_listp) {
2856                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2857                                                 LOG_NODE,
2858                                                 "0282 did:x%x ndlp:x%p "
2859                                                 "usgmap:x%x refcnt:%d\n",
2860                                                 ndlp->nlp_DID, (void *)ndlp,
2861                                                 ndlp->nlp_usg_map,
2862                                                 kref_read(&ndlp->kref));
2863                         }
2864                         break;
2865                 }
2866
2867                 /* Wait for any activity on ndlps to settle */
2868                 msleep(10);
2869         }
2870         lpfc_cleanup_vports_rrqs(vport, NULL);
2871 }
2872
2873 /**
2874  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2875  * @vport: pointer to a virtual N_Port data structure.
2876  *
2877  * This routine stops all the timers associated with a @vport. This function
2878  * is invoked before disabling or deleting a @vport. Note that the physical
2879  * port is treated as @vport 0.
2880  **/
2881 void
2882 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2883 {
2884         del_timer_sync(&vport->els_tmofunc);
2885         del_timer_sync(&vport->delayed_disc_tmo);
2886         lpfc_can_disctmo(vport);
2887         return;
2888 }
2889
2890 /**
2891  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2892  * @phba: pointer to lpfc hba data structure.
2893  *
2894  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2895  * caller of this routine should already hold the host lock.
2896  **/
2897 void
2898 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2899 {
2900         /* Clear pending FCF rediscovery wait flag */
2901         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2902
2903         /* Now, try to stop the timer */
2904         del_timer(&phba->fcf.redisc_wait);
2905 }
2906
2907 /**
2908  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2909  * @phba: pointer to lpfc hba data structure.
2910  *
2911  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2912  * checks whether the FCF rediscovery wait timer is pending with the host
2913  * lock held before proceeding with disabling the timer and clearing the
2914  * wait timer pendig flag.
2915  **/
2916 void
2917 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2918 {
2919         spin_lock_irq(&phba->hbalock);
2920         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2921                 /* FCF rediscovery timer already fired or stopped */
2922                 spin_unlock_irq(&phba->hbalock);
2923                 return;
2924         }
2925         __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2926         /* Clear failover in progress flags */
2927         phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2928         spin_unlock_irq(&phba->hbalock);
2929 }
2930
2931 /**
2932  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2933  * @phba: pointer to lpfc hba data structure.
2934  *
2935  * This routine stops all the timers associated with a HBA. This function is
2936  * invoked before either putting a HBA offline or unloading the driver.
2937  **/
2938 void
2939 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2940 {
2941         lpfc_stop_vport_timers(phba->pport);
2942         del_timer_sync(&phba->sli.mbox_tmo);
2943         del_timer_sync(&phba->fabric_block_timer);
2944         del_timer_sync(&phba->eratt_poll);
2945         del_timer_sync(&phba->hb_tmofunc);
2946         if (phba->sli_rev == LPFC_SLI_REV4) {
2947                 del_timer_sync(&phba->rrq_tmr);
2948                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2949         }
2950         phba->hb_outstanding = 0;
2951
2952         switch (phba->pci_dev_grp) {
2953         case LPFC_PCI_DEV_LP:
2954                 /* Stop any LightPulse device specific driver timers */
2955                 del_timer_sync(&phba->fcp_poll_timer);
2956                 break;
2957         case LPFC_PCI_DEV_OC:
2958                 /* Stop any OneConnect device sepcific driver timers */
2959                 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2960                 break;
2961         default:
2962                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2963                                 "0297 Invalid device group (x%x)\n",
2964                                 phba->pci_dev_grp);
2965                 break;
2966         }
2967         return;
2968 }
2969
2970 /**
2971  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2972  * @phba: pointer to lpfc hba data structure.
2973  *
2974  * This routine marks a HBA's management interface as blocked. Once the HBA's
2975  * management interface is marked as blocked, all the user space access to
2976  * the HBA, whether they are from sysfs interface or libdfc interface will
2977  * all be blocked. The HBA is set to block the management interface when the
2978  * driver prepares the HBA interface for online or offline.
2979  **/
2980 static void
2981 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2982 {
2983         unsigned long iflag;
2984         uint8_t actcmd = MBX_HEARTBEAT;
2985         unsigned long timeout;
2986
2987         spin_lock_irqsave(&phba->hbalock, iflag);
2988         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2989         spin_unlock_irqrestore(&phba->hbalock, iflag);
2990         if (mbx_action == LPFC_MBX_NO_WAIT)
2991                 return;
2992         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2993         spin_lock_irqsave(&phba->hbalock, iflag);
2994         if (phba->sli.mbox_active) {
2995                 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2996                 /* Determine how long we might wait for the active mailbox
2997                  * command to be gracefully completed by firmware.
2998                  */
2999                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3000                                 phba->sli.mbox_active) * 1000) + jiffies;
3001         }
3002         spin_unlock_irqrestore(&phba->hbalock, iflag);
3003
3004         /* Wait for the outstnading mailbox command to complete */
3005         while (phba->sli.mbox_active) {
3006                 /* Check active mailbox complete status every 2ms */
3007                 msleep(2);
3008                 if (time_after(jiffies, timeout)) {
3009                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3010                                 "2813 Mgmt IO is Blocked %x "
3011                                 "- mbox cmd %x still active\n",
3012                                 phba->sli.sli_flag, actcmd);
3013                         break;
3014                 }
3015         }
3016 }
3017
3018 /**
3019  * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3020  * @phba: pointer to lpfc hba data structure.
3021  *
3022  * Allocate RPIs for all active remote nodes. This is needed whenever
3023  * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3024  * is to fixup the temporary rpi assignments.
3025  **/
3026 void
3027 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3028 {
3029         struct lpfc_nodelist  *ndlp, *next_ndlp;
3030         struct lpfc_vport **vports;
3031         int i, rpi;
3032         unsigned long flags;
3033
3034         if (phba->sli_rev != LPFC_SLI_REV4)
3035                 return;
3036
3037         vports = lpfc_create_vport_work_array(phba);
3038         if (vports == NULL)
3039                 return;
3040
3041         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3042                 if (vports[i]->load_flag & FC_UNLOADING)
3043                         continue;
3044
3045                 list_for_each_entry_safe(ndlp, next_ndlp,
3046                                          &vports[i]->fc_nodes,
3047                                          nlp_listp) {
3048                         if (!NLP_CHK_NODE_ACT(ndlp))
3049                                 continue;
3050                         rpi = lpfc_sli4_alloc_rpi(phba);
3051                         if (rpi == LPFC_RPI_ALLOC_ERROR) {
3052                                 spin_lock_irqsave(&phba->ndlp_lock, flags);
3053                                 NLP_CLR_NODE_ACT(ndlp);
3054                                 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3055                                 continue;
3056                         }
3057                         ndlp->nlp_rpi = rpi;
3058                         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3059                                          "0009 rpi:%x DID:%x "
3060                                          "flg:%x map:%x %p\n", ndlp->nlp_rpi,
3061                                          ndlp->nlp_DID, ndlp->nlp_flag,
3062                                          ndlp->nlp_usg_map, ndlp);
3063                 }
3064         }
3065         lpfc_destroy_vport_work_array(phba, vports);
3066 }
3067
3068 /**
3069  * lpfc_online - Initialize and bring a HBA online
3070  * @phba: pointer to lpfc hba data structure.
3071  *
3072  * This routine initializes the HBA and brings a HBA online. During this
3073  * process, the management interface is blocked to prevent user space access
3074  * to the HBA interfering with the driver initialization.
3075  *
3076  * Return codes
3077  *   0 - successful
3078  *   1 - failed
3079  **/
3080 int
3081 lpfc_online(struct lpfc_hba *phba)
3082 {
3083         struct lpfc_vport *vport;
3084         struct lpfc_vport **vports;
3085         int i, error = 0;
3086         bool vpis_cleared = false;
3087
3088         if (!phba)
3089                 return 0;
3090         vport = phba->pport;
3091
3092         if (!(vport->fc_flag & FC_OFFLINE_MODE))
3093                 return 0;
3094
3095         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3096                         "0458 Bring Adapter online\n");
3097
3098         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3099
3100         if (phba->sli_rev == LPFC_SLI_REV4) {
3101                 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3102                         lpfc_unblock_mgmt_io(phba);
3103                         return 1;
3104                 }
3105                 spin_lock_irq(&phba->hbalock);
3106                 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3107                         vpis_cleared = true;
3108                 spin_unlock_irq(&phba->hbalock);
3109
3110                 /* Reestablish the local initiator port.
3111                  * The offline process destroyed the previous lport.
3112                  */
3113                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3114                                 !phba->nvmet_support) {
3115                         error = lpfc_nvme_create_localport(phba->pport);
3116                         if (error)
3117                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3118                                         "6132 NVME restore reg failed "
3119                                         "on nvmei error x%x\n", error);
3120                 }
3121         } else {
3122                 lpfc_sli_queue_init(phba);
3123                 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3124                         lpfc_unblock_mgmt_io(phba);
3125                         return 1;
3126                 }
3127         }
3128
3129         vports = lpfc_create_vport_work_array(phba);
3130         if (vports != NULL) {
3131                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3132                         struct Scsi_Host *shost;
3133                         shost = lpfc_shost_from_vport(vports[i]);
3134                         spin_lock_irq(shost->host_lock);
3135                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3136                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3137                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3138                         if (phba->sli_rev == LPFC_SLI_REV4) {
3139                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3140                                 if ((vpis_cleared) &&
3141                                     (vports[i]->port_type !=
3142                                         LPFC_PHYSICAL_PORT))
3143                                         vports[i]->vpi = 0;
3144                         }
3145                         spin_unlock_irq(shost->host_lock);
3146                 }
3147         }
3148         lpfc_destroy_vport_work_array(phba, vports);
3149
3150         lpfc_unblock_mgmt_io(phba);
3151         return 0;
3152 }
3153
3154 /**
3155  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3156  * @phba: pointer to lpfc hba data structure.
3157  *
3158  * This routine marks a HBA's management interface as not blocked. Once the
3159  * HBA's management interface is marked as not blocked, all the user space
3160  * access to the HBA, whether they are from sysfs interface or libdfc
3161  * interface will be allowed. The HBA is set to block the management interface
3162  * when the driver prepares the HBA interface for online or offline and then
3163  * set to unblock the management interface afterwards.
3164  **/
3165 void
3166 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3167 {
3168         unsigned long iflag;
3169
3170         spin_lock_irqsave(&phba->hbalock, iflag);
3171         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3172         spin_unlock_irqrestore(&phba->hbalock, iflag);
3173 }
3174
3175 /**
3176  * lpfc_offline_prep - Prepare a HBA to be brought offline
3177  * @phba: pointer to lpfc hba data structure.
3178  *
3179  * This routine is invoked to prepare a HBA to be brought offline. It performs
3180  * unregistration login to all the nodes on all vports and flushes the mailbox
3181  * queue to make it ready to be brought offline.
3182  **/
3183 void
3184 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3185 {
3186         struct lpfc_vport *vport = phba->pport;
3187         struct lpfc_nodelist  *ndlp, *next_ndlp;
3188         struct lpfc_vport **vports;
3189         struct Scsi_Host *shost;
3190         int i;
3191
3192         if (vport->fc_flag & FC_OFFLINE_MODE)
3193                 return;
3194
3195         lpfc_block_mgmt_io(phba, mbx_action);
3196
3197         lpfc_linkdown(phba);
3198
3199         /* Issue an unreg_login to all nodes on all vports */
3200         vports = lpfc_create_vport_work_array(phba);
3201         if (vports != NULL) {
3202                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3203                         if (vports[i]->load_flag & FC_UNLOADING)
3204                                 continue;
3205                         shost = lpfc_shost_from_vport(vports[i]);
3206                         spin_lock_irq(shost->host_lock);
3207                         vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3208                         vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3209                         vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3210                         spin_unlock_irq(shost->host_lock);
3211
3212                         shost = lpfc_shost_from_vport(vports[i]);
3213                         list_for_each_entry_safe(ndlp, next_ndlp,
3214                                                  &vports[i]->fc_nodes,
3215                                                  nlp_listp) {
3216                                 if (!NLP_CHK_NODE_ACT(ndlp))
3217                                         continue;
3218                                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3219                                         continue;
3220                                 if (ndlp->nlp_type & NLP_FABRIC) {
3221                                         lpfc_disc_state_machine(vports[i], ndlp,
3222                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
3223                                         lpfc_disc_state_machine(vports[i], ndlp,
3224                                                 NULL, NLP_EVT_DEVICE_RM);
3225                                 }
3226                                 spin_lock_irq(shost->host_lock);
3227                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3228                                 spin_unlock_irq(shost->host_lock);
3229                                 /*
3230                                  * Whenever an SLI4 port goes offline, free the
3231                                  * RPI. Get a new RPI when the adapter port
3232                                  * comes back online.
3233                                  */
3234                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3235                                         lpfc_printf_vlog(ndlp->vport,
3236                                                          KERN_INFO, LOG_NODE,
3237                                                          "0011 lpfc_offline: "
3238                                                          "ndlp:x%p did %x "
3239                                                          "usgmap:x%x rpi:%x\n",
3240                                                          ndlp, ndlp->nlp_DID,
3241                                                          ndlp->nlp_usg_map,
3242                                                          ndlp->nlp_rpi);
3243
3244                                         lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3245                                 }
3246                                 lpfc_unreg_rpi(vports[i], ndlp);
3247                         }
3248                 }
3249         }
3250         lpfc_destroy_vport_work_array(phba, vports);
3251
3252         lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3253
3254         if (phba->wq)
3255                 flush_workqueue(phba->wq);
3256 }
3257
3258 /**
3259  * lpfc_offline - Bring a HBA offline
3260  * @phba: pointer to lpfc hba data structure.
3261  *
3262  * This routine actually brings a HBA offline. It stops all the timers
3263  * associated with the HBA, brings down the SLI layer, and eventually
3264  * marks the HBA as in offline state for the upper layer protocol.
3265  **/
3266 void
3267 lpfc_offline(struct lpfc_hba *phba)
3268 {
3269         struct Scsi_Host  *shost;
3270         struct lpfc_vport **vports;
3271         int i;
3272
3273         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3274                 return;
3275
3276         /* stop port and all timers associated with this hba */
3277         lpfc_stop_port(phba);
3278
3279         /* Tear down the local and target port registrations.  The
3280          * nvme transports need to cleanup.
3281          */
3282         lpfc_nvmet_destroy_targetport(phba);
3283         lpfc_nvme_destroy_localport(phba->pport);
3284
3285         vports = lpfc_create_vport_work_array(phba);
3286         if (vports != NULL)
3287                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3288                         lpfc_stop_vport_timers(vports[i]);
3289         lpfc_destroy_vport_work_array(phba, vports);
3290         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3291                         "0460 Bring Adapter offline\n");
3292         /* Bring down the SLI Layer and cleanup.  The HBA is offline
3293            now.  */
3294         lpfc_sli_hba_down(phba);
3295         spin_lock_irq(&phba->hbalock);
3296         phba->work_ha = 0;
3297         spin_unlock_irq(&phba->hbalock);
3298         vports = lpfc_create_vport_work_array(phba);
3299         if (vports != NULL)
3300                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3301                         shost = lpfc_shost_from_vport(vports[i]);
3302                         spin_lock_irq(shost->host_lock);
3303                         vports[i]->work_port_events = 0;
3304                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
3305                         spin_unlock_irq(shost->host_lock);
3306                 }
3307         lpfc_destroy_vport_work_array(phba, vports);
3308 }
3309
3310 /**
3311  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3312  * @phba: pointer to lpfc hba data structure.
3313  *
3314  * This routine is to free all the SCSI buffers and IOCBs from the driver
3315  * list back to kernel. It is called from lpfc_pci_remove_one to free
3316  * the internal resources before the device is removed from the system.
3317  **/
3318 static void
3319 lpfc_scsi_free(struct lpfc_hba *phba)
3320 {
3321         struct lpfc_scsi_buf *sb, *sb_next;
3322
3323         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3324                 return;
3325
3326         spin_lock_irq(&phba->hbalock);
3327
3328         /* Release all the lpfc_scsi_bufs maintained by this host. */
3329
3330         spin_lock(&phba->scsi_buf_list_put_lock);
3331         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3332                                  list) {
3333                 list_del(&sb->list);
3334                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3335                               sb->dma_handle);
3336                 kfree(sb);
3337                 phba->total_scsi_bufs--;
3338         }
3339         spin_unlock(&phba->scsi_buf_list_put_lock);
3340
3341         spin_lock(&phba->scsi_buf_list_get_lock);
3342         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3343                                  list) {
3344                 list_del(&sb->list);
3345                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3346                               sb->dma_handle);
3347                 kfree(sb);
3348                 phba->total_scsi_bufs--;
3349         }
3350         spin_unlock(&phba->scsi_buf_list_get_lock);
3351         spin_unlock_irq(&phba->hbalock);
3352 }
3353 /**
3354  * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
3355  * @phba: pointer to lpfc hba data structure.
3356  *
3357  * This routine is to free all the NVME buffers and IOCBs from the driver
3358  * list back to kernel. It is called from lpfc_pci_remove_one to free
3359  * the internal resources before the device is removed from the system.
3360  **/
3361 static void
3362 lpfc_nvme_free(struct lpfc_hba *phba)
3363 {
3364         struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
3365
3366         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3367                 return;
3368
3369         spin_lock_irq(&phba->hbalock);
3370
3371         /* Release all the lpfc_nvme_bufs maintained by this host. */
3372         spin_lock(&phba->nvme_buf_list_put_lock);
3373         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3374                                  &phba->lpfc_nvme_buf_list_put, list) {
3375                 list_del(&lpfc_ncmd->list);
3376                 phba->put_nvme_bufs--;
3377                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3378                               lpfc_ncmd->dma_handle);
3379                 kfree(lpfc_ncmd);
3380                 phba->total_nvme_bufs--;
3381         }
3382         spin_unlock(&phba->nvme_buf_list_put_lock);
3383
3384         spin_lock(&phba->nvme_buf_list_get_lock);
3385         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3386                                  &phba->lpfc_nvme_buf_list_get, list) {
3387                 list_del(&lpfc_ncmd->list);
3388                 phba->get_nvme_bufs--;
3389                 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3390                               lpfc_ncmd->dma_handle);
3391                 kfree(lpfc_ncmd);
3392                 phba->total_nvme_bufs--;
3393         }
3394         spin_unlock(&phba->nvme_buf_list_get_lock);
3395         spin_unlock_irq(&phba->hbalock);
3396 }
3397 /**
3398  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
3399  * @phba: pointer to lpfc hba data structure.
3400  *
3401  * This routine first calculates the sizes of the current els and allocated
3402  * scsi sgl lists, and then goes through all sgls to updates the physical
3403  * XRIs assigned due to port function reset. During port initialization, the
3404  * current els and allocated scsi sgl lists are 0s.
3405  *
3406  * Return codes
3407  *   0 - successful (for now, it always returns 0)
3408  **/
3409 int
3410 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3411 {
3412         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3413         uint16_t i, lxri, xri_cnt, els_xri_cnt;
3414         LIST_HEAD(els_sgl_list);
3415         int rc;
3416
3417         /*
3418          * update on pci function's els xri-sgl list
3419          */
3420         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3421
3422         if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3423                 /* els xri-sgl expanded */
3424                 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3425                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3426                                 "3157 ELS xri-sgl count increased from "
3427                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3428                                 els_xri_cnt);
3429                 /* allocate the additional els sgls */
3430                 for (i = 0; i < xri_cnt; i++) {
3431                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3432                                              GFP_KERNEL);
3433                         if (sglq_entry == NULL) {
3434                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3435                                                 "2562 Failure to allocate an "
3436                                                 "ELS sgl entry:%d\n", i);
3437                                 rc = -ENOMEM;
3438                                 goto out_free_mem;
3439                         }
3440                         sglq_entry->buff_type = GEN_BUFF_TYPE;
3441                         sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3442                                                            &sglq_entry->phys);
3443                         if (sglq_entry->virt == NULL) {
3444                                 kfree(sglq_entry);
3445                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3446                                                 "2563 Failure to allocate an "
3447                                                 "ELS mbuf:%d\n", i);
3448                                 rc = -ENOMEM;
3449                                 goto out_free_mem;
3450                         }
3451                         sglq_entry->sgl = sglq_entry->virt;
3452                         memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3453                         sglq_entry->state = SGL_FREED;
3454                         list_add_tail(&sglq_entry->list, &els_sgl_list);
3455                 }
3456                 spin_lock_irq(&phba->hbalock);
3457                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3458                 list_splice_init(&els_sgl_list,
3459                                  &phba->sli4_hba.lpfc_els_sgl_list);
3460                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3461                 spin_unlock_irq(&phba->hbalock);
3462         } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3463                 /* els xri-sgl shrinked */
3464                 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3465                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3466                                 "3158 ELS xri-sgl count decreased from "
3467                                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3468                                 els_xri_cnt);
3469                 spin_lock_irq(&phba->hbalock);
3470                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3471                 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3472                                  &els_sgl_list);
3473                 /* release extra els sgls from list */
3474                 for (i = 0; i < xri_cnt; i++) {
3475                         list_remove_head(&els_sgl_list,
3476                                          sglq_entry, struct lpfc_sglq, list);
3477                         if (sglq_entry) {
3478                                 __lpfc_mbuf_free(phba, sglq_entry->virt,
3479                                                  sglq_entry->phys);
3480                                 kfree(sglq_entry);
3481                         }
3482                 }
3483                 list_splice_init(&els_sgl_list,
3484                                  &phba->sli4_hba.lpfc_els_sgl_list);
3485                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3486                 spin_unlock_irq(&phba->hbalock);
3487         } else
3488                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3489                                 "3163 ELS xri-sgl count unchanged: %d\n",
3490                                 els_xri_cnt);
3491         phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3492
3493         /* update xris to els sgls on the list */
3494         sglq_entry = NULL;
3495         sglq_entry_next = NULL;
3496         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3497                                  &phba->sli4_hba.lpfc_els_sgl_list, list) {
3498                 lxri = lpfc_sli4_next_xritag(phba);
3499                 if (lxri == NO_XRI) {
3500                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3501                                         "2400 Failed to allocate xri for "
3502                                         "ELS sgl\n");
3503                         rc = -ENOMEM;
3504                         goto out_free_mem;
3505                 }
3506                 sglq_entry->sli4_lxritag = lxri;
3507                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3508         }
3509         return 0;
3510
3511 out_free_mem:
3512         lpfc_free_els_sgl_list(phba);
3513         return rc;
3514 }
3515
3516 /**
3517  * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
3518  * @phba: pointer to lpfc hba data structure.
3519  *
3520  * This routine first calculates the sizes of the current els and allocated
3521  * scsi sgl lists, and then goes through all sgls to updates the physical
3522  * XRIs assigned due to port function reset. During port initialization, the
3523  * current els and allocated scsi sgl lists are 0s.
3524  *
3525  * Return codes
3526  *   0 - successful (for now, it always returns 0)
3527  **/
3528 int
3529 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3530 {
3531         struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3532         uint16_t i, lxri, xri_cnt, els_xri_cnt;
3533         uint16_t nvmet_xri_cnt;
3534         LIST_HEAD(nvmet_sgl_list);
3535         int rc;
3536
3537         /*
3538          * update on pci function's nvmet xri-sgl list
3539          */
3540         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3541
3542         /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
3543         nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3544         if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3545                 /* els xri-sgl expanded */
3546                 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3547                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3548                                 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3549                                 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3550                 /* allocate the additional nvmet sgls */
3551                 for (i = 0; i < xri_cnt; i++) {
3552                         sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3553                                              GFP_KERNEL);
3554                         if (sglq_entry == NULL) {
3555                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3556                                                 "6303 Failure to allocate an "
3557                                                 "NVMET sgl entry:%d\n", i);
3558                                 rc = -ENOMEM;
3559                                 goto out_free_mem;
3560                         }
3561                         sglq_entry->buff_type = NVMET_BUFF_TYPE;
3562                         sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3563                                                            &sglq_entry->phys);
3564                         if (sglq_entry->virt == NULL) {
3565                                 kfree(sglq_entry);
3566                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3567                                                 "6304 Failure to allocate an "
3568                                                 "NVMET buf:%d\n", i);
3569                                 rc = -ENOMEM;
3570                                 goto out_free_mem;
3571                         }
3572                         sglq_entry->sgl = sglq_entry->virt;
3573                         memset(sglq_entry->sgl, 0,
3574                                phba->cfg_sg_dma_buf_size);
3575                         sglq_entry->state = SGL_FREED;
3576                         list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3577                 }
3578                 spin_lock_irq(&phba->hbalock);
3579                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3580                 list_splice_init(&nvmet_sgl_list,
3581                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
3582                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3583                 spin_unlock_irq(&phba->hbalock);
3584         } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3585                 /* nvmet xri-sgl shrunk */
3586                 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3587                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3588                                 "6305 NVMET xri-sgl count decreased from "
3589                                 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3590                                 nvmet_xri_cnt);
3591                 spin_lock_irq(&phba->hbalock);
3592                 spin_lock(&phba->sli4_hba.sgl_list_lock);
3593                 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3594                                  &nvmet_sgl_list);
3595                 /* release extra nvmet sgls from list */
3596                 for (i = 0; i < xri_cnt; i++) {
3597                         list_remove_head(&nvmet_sgl_list,
3598                                          sglq_entry, struct lpfc_sglq, list);
3599                         if (sglq_entry) {
3600                                 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3601                                                     sglq_entry->phys);
3602                                 kfree(sglq_entry);
3603                         }
3604                 }
3605                 list_splice_init(&nvmet_sgl_list,
3606                                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
3607                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3608                 spin_unlock_irq(&phba->hbalock);
3609         } else
3610                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3611                                 "6306 NVMET xri-sgl count unchanged: %d\n",
3612                                 nvmet_xri_cnt);
3613         phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3614
3615         /* update xris to nvmet sgls on the list */
3616         sglq_entry = NULL;
3617         sglq_entry_next = NULL;
3618         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3619                                  &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3620                 lxri = lpfc_sli4_next_xritag(phba);
3621                 if (lxri == NO_XRI) {
3622                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3623                                         "6307 Failed to allocate xri for "
3624                                         "NVMET sgl\n");
3625                         rc = -ENOMEM;
3626                         goto out_free_mem;
3627                 }
3628                 sglq_entry->sli4_lxritag = lxri;
3629                 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3630         }
3631         return 0;
3632
3633 out_free_mem:
3634         lpfc_free_nvmet_sgl_list(phba);
3635         return rc;
3636 }
3637
3638 /**
3639  * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
3640  * @phba: pointer to lpfc hba data structure.
3641  *
3642  * This routine first calculates the sizes of the current els and allocated
3643  * scsi sgl lists, and then goes through all sgls to updates the physical
3644  * XRIs assigned due to port function reset. During port initialization, the
3645  * current els and allocated scsi sgl lists are 0s.
3646  *
3647  * Return codes
3648  *   0 - successful (for now, it always returns 0)
3649  **/
3650 int
3651 lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
3652 {
3653         struct lpfc_scsi_buf *psb, *psb_next;
3654         uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
3655         LIST_HEAD(scsi_sgl_list);
3656         int rc;
3657
3658         /*
3659          * update on pci function's els xri-sgl list
3660          */
3661         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3662         phba->total_scsi_bufs = 0;
3663
3664         /*
3665          * update on pci function's allocated scsi xri-sgl list
3666          */
3667         /* maximum number of xris available for scsi buffers */
3668         phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3669                                       els_xri_cnt;
3670
3671         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3672                 return 0;
3673
3674         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3675                 phba->sli4_hba.scsi_xri_max =  /* Split them up */
3676                         (phba->sli4_hba.scsi_xri_max *
3677                          phba->cfg_xri_split) / 100;
3678
3679         spin_lock_irq(&phba->scsi_buf_list_get_lock);
3680         spin_lock(&phba->scsi_buf_list_put_lock);
3681         list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3682         list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3683         spin_unlock(&phba->scsi_buf_list_put_lock);
3684         spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3685
3686         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3687                         "6060 Current allocated SCSI xri-sgl count:%d, "
3688                         "maximum  SCSI xri count:%d (split:%d)\n",
3689                         phba->sli4_hba.scsi_xri_cnt,
3690                         phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
3691
3692         if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3693                 /* max scsi xri shrinked below the allocated scsi buffers */
3694                 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3695                                         phba->sli4_hba.scsi_xri_max;
3696                 /* release the extra allocated scsi buffers */
3697                 for (i = 0; i < scsi_xri_cnt; i++) {
3698                         list_remove_head(&scsi_sgl_list, psb,
3699                                          struct lpfc_scsi_buf, list);
3700                         if (psb) {
3701                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3702                                               psb->data, psb->dma_handle);
3703                                 kfree(psb);
3704                         }
3705                 }
3706                 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3707                 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3708                 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3709         }
3710
3711         /* update xris associated to remaining allocated scsi buffers */
3712         psb = NULL;
3713         psb_next = NULL;
3714         list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3715                 lxri = lpfc_sli4_next_xritag(phba);
3716                 if (lxri == NO_XRI) {
3717                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3718                                         "2560 Failed to allocate xri for "
3719                                         "scsi buffer\n");
3720                         rc = -ENOMEM;
3721                         goto out_free_mem;
3722                 }
3723                 psb->cur_iocbq.sli4_lxritag = lxri;
3724                 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3725         }
3726         spin_lock_irq(&phba->scsi_buf_list_get_lock);
3727         spin_lock(&phba->scsi_buf_list_put_lock);
3728         list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3729         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3730         spin_unlock(&phba->scsi_buf_list_put_lock);
3731         spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3732         return 0;
3733
3734 out_free_mem:
3735         lpfc_scsi_free(phba);
3736         return rc;
3737 }
3738
3739 static uint64_t
3740 lpfc_get_wwpn(struct lpfc_hba *phba)
3741 {
3742         uint64_t wwn;
3743         int rc;
3744         LPFC_MBOXQ_t *mboxq;
3745         MAILBOX_t *mb;
3746
3747         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
3748                                                 GFP_KERNEL);
3749         if (!mboxq)
3750                 return (uint64_t)-1;
3751
3752         /* First get WWN of HBA instance */
3753         lpfc_read_nv(phba, mboxq);
3754         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
3755         if (rc != MBX_SUCCESS) {
3756                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3757                                 "6019 Mailbox failed , mbxCmd x%x "
3758                                 "READ_NV, mbxStatus x%x\n",
3759                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
3760                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
3761                 mempool_free(mboxq, phba->mbox_mem_pool);
3762                 return (uint64_t) -1;
3763         }
3764         mb = &mboxq->u.mb;
3765         memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
3766         /* wwn is WWPN of HBA instance */
3767         mempool_free(mboxq, phba->mbox_mem_pool);
3768         if (phba->sli_rev == LPFC_SLI_REV4)
3769                 return be64_to_cpu(wwn);
3770         else
3771                 return rol64(wwn, 32);
3772 }
3773
3774 /**
3775  * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
3776  * @phba: pointer to lpfc hba data structure.
3777  *
3778  * This routine first calculates the sizes of the current els and allocated
3779  * scsi sgl lists, and then goes through all sgls to updates the physical
3780  * XRIs assigned due to port function reset. During port initialization, the
3781  * current els and allocated scsi sgl lists are 0s.
3782  *
3783  * Return codes
3784  *   0 - successful (for now, it always returns 0)
3785  **/
3786 int
3787 lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
3788 {
3789         struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
3790         uint16_t i, lxri, els_xri_cnt;
3791         uint16_t nvme_xri_cnt, nvme_xri_max;
3792         LIST_HEAD(nvme_sgl_list);
3793         int rc, cnt;
3794
3795         phba->total_nvme_bufs = 0;
3796         phba->get_nvme_bufs = 0;
3797         phba->put_nvme_bufs = 0;
3798
3799         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3800                 return 0;
3801         /*
3802          * update on pci function's allocated nvme xri-sgl list
3803          */
3804
3805         /* maximum number of xris available for nvme buffers */
3806         els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3807         nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3808         phba->sli4_hba.nvme_xri_max = nvme_xri_max;
3809         phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
3810
3811         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3812                         "6074 Current allocated NVME xri-sgl count:%d, "
3813                         "maximum  NVME xri count:%d\n",
3814                         phba->sli4_hba.nvme_xri_cnt,
3815                         phba->sli4_hba.nvme_xri_max);
3816
3817         spin_lock_irq(&phba->nvme_buf_list_get_lock);
3818         spin_lock(&phba->nvme_buf_list_put_lock);
3819         list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
3820         list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
3821         cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
3822         phba->get_nvme_bufs = 0;
3823         phba->put_nvme_bufs = 0;
3824         spin_unlock(&phba->nvme_buf_list_put_lock);
3825         spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3826
3827         if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
3828                 /* max nvme xri shrunk below the allocated nvme buffers */
3829                 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3830                 nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
3831                                         phba->sli4_hba.nvme_xri_max;
3832                 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3833                 /* release the extra allocated nvme buffers */
3834                 for (i = 0; i < nvme_xri_cnt; i++) {
3835                         list_remove_head(&nvme_sgl_list, lpfc_ncmd,
3836                                          struct lpfc_nvme_buf, list);
3837                         if (lpfc_ncmd) {
3838                                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3839                                               lpfc_ncmd->data,
3840                                               lpfc_ncmd->dma_handle);
3841                                 kfree(lpfc_ncmd);
3842                         }
3843                 }
3844                 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3845                 phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
3846                 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3847         }
3848
3849         /* update xris associated to remaining allocated nvme buffers */
3850         lpfc_ncmd = NULL;
3851         lpfc_ncmd_next = NULL;
3852         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3853                                  &nvme_sgl_list, list) {
3854                 lxri = lpfc_sli4_next_xritag(phba);
3855                 if (lxri == NO_XRI) {
3856                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3857                                         "6075 Failed to allocate xri for "
3858                                         "nvme buffer\n");
3859                         rc = -ENOMEM;
3860                         goto out_free_mem;
3861                 }
3862                 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
3863                 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3864         }
3865         spin_lock_irq(&phba->nvme_buf_list_get_lock);
3866         spin_lock(&phba->nvme_buf_list_put_lock);
3867         list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
3868         phba->get_nvme_bufs = cnt;
3869         INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
3870         spin_unlock(&phba->nvme_buf_list_put_lock);
3871         spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3872         return 0;
3873
3874 out_free_mem:
3875         lpfc_nvme_free(phba);
3876         return rc;
3877 }
3878
3879 /**
3880  * lpfc_create_port - Create an FC port
3881  * @phba: pointer to lpfc hba data structure.
3882  * @instance: a unique integer ID to this FC port.
3883  * @dev: pointer to the device data structure.
3884  *
3885  * This routine creates a FC port for the upper layer protocol. The FC port
3886  * can be created on top of either a physical port or a virtual port provided
3887  * by the HBA. This routine also allocates a SCSI host data structure (shost)
3888  * and associates the FC port created before adding the shost into the SCSI
3889  * layer.
3890  *
3891  * Return codes
3892  *   @vport - pointer to the virtual N_Port data structure.
3893  *   NULL - port create failed.
3894  **/
3895 struct lpfc_vport *
3896 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3897 {
3898         struct lpfc_vport *vport;
3899         struct Scsi_Host  *shost = NULL;
3900         int error = 0;
3901         int i;
3902         uint64_t wwn;
3903         bool use_no_reset_hba = false;
3904         int rc;
3905
3906         if (lpfc_no_hba_reset_cnt) {
3907                 if (phba->sli_rev < LPFC_SLI_REV4 &&
3908                     dev == &phba->pcidev->dev) {
3909                         /* Reset the port first */
3910                         lpfc_sli_brdrestart(phba);
3911                         rc = lpfc_sli_chipset_init(phba);
3912                         if (rc)
3913                                 return NULL;
3914                 }
3915                 wwn = lpfc_get_wwpn(phba);
3916         }
3917
3918         for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
3919                 if (wwn == lpfc_no_hba_reset[i]) {
3920                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3921                                         "6020 Setting use_no_reset port=%llx\n",
3922                                         wwn);
3923                         use_no_reset_hba = true;
3924                         break;
3925                 }
3926         }
3927
3928         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
3929                 if (dev != &phba->pcidev->dev) {
3930                         shost = scsi_host_alloc(&lpfc_vport_template,
3931                                                 sizeof(struct lpfc_vport));
3932                 } else {
3933                         if (!use_no_reset_hba)
3934                                 shost = scsi_host_alloc(&lpfc_template,
3935                                                 sizeof(struct lpfc_vport));
3936                         else
3937                                 shost = scsi_host_alloc(&lpfc_template_no_hr,
3938                                                 sizeof(struct lpfc_vport));
3939                 }
3940         } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3941                 shost = scsi_host_alloc(&lpfc_template_nvme,
3942                                         sizeof(struct lpfc_vport));
3943         }
3944         if (!shost)
3945                 goto out;
3946
3947         vport = (struct lpfc_vport *) shost->hostdata;
3948         vport->phba = phba;
3949         vport->load_flag |= FC_LOADING;
3950         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3951         vport->fc_rscn_flush = 0;
3952         lpfc_get_vport_cfgparam(vport);
3953
3954         shost->unique_id = instance;
3955         shost->max_id = LPFC_MAX_TARGET;
3956         shost->max_lun = vport->cfg_max_luns;
3957         shost->this_id = -1;
3958         shost->max_cmd_len = 16;
3959         shost->nr_hw_queues = phba->cfg_fcp_io_channel;
3960         if (phba->sli_rev == LPFC_SLI_REV4) {
3961                 shost->dma_boundary =
3962                         phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3963                 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
3964         }
3965
3966         /*
3967          * Set initial can_queue value since 0 is no longer supported and
3968          * scsi_add_host will fail. This will be adjusted later based on the
3969          * max xri value determined in hba setup.
3970          */
3971         shost->can_queue = phba->cfg_hba_queue_depth - 10;
3972         if (dev != &phba->pcidev->dev) {
3973                 shost->transportt = lpfc_vport_transport_template;
3974                 vport->port_type = LPFC_NPIV_PORT;
3975         } else {
3976                 shost->transportt = lpfc_transport_template;
3977                 vport->port_type = LPFC_PHYSICAL_PORT;
3978         }
3979
3980         /* Initialize all internally managed lists. */
3981         INIT_LIST_HEAD(&vport->fc_nodes);
3982         INIT_LIST_HEAD(&vport->rcv_buffer_list);
3983         spin_lock_init(&vport->work_port_lock);
3984
3985         timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
3986
3987         timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
3988
3989         timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
3990
3991         error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3992         if (error)
3993                 goto out_put_shost;
3994
3995         spin_lock_irq(&phba->port_list_lock);
3996         list_add_tail(&vport->listentry, &phba->port_list);
3997         spin_unlock_irq(&phba->port_list_lock);
3998         return vport;
3999
4000 out_put_shost:
4001         scsi_host_put(shost);
4002 out:
4003         return NULL;
4004 }
4005
4006 /**
4007  * destroy_port -  destroy an FC port
4008  * @vport: pointer to an lpfc virtual N_Port data structure.
4009  *
4010  * This routine destroys a FC port from the upper layer protocol. All the
4011  * resources associated with the port are released.
4012  **/
4013 void
4014 destroy_port(struct lpfc_vport *vport)
4015 {
4016         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4017         struct lpfc_hba  *phba = vport->phba;
4018
4019         lpfc_debugfs_terminate(vport);
4020         fc_remove_host(shost);
4021         scsi_remove_host(shost);
4022
4023         spin_lock_irq(&phba->port_list_lock);
4024         list_del_init(&vport->listentry);
4025         spin_unlock_irq(&phba->port_list_lock);
4026
4027         lpfc_cleanup(vport);
4028         return;
4029 }
4030
4031 /**
4032  * lpfc_get_instance - Get a unique integer ID
4033  *
4034  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4035  * uses the kernel idr facility to perform the task.
4036  *
4037  * Return codes:
4038  *   instance - a unique integer ID allocated as the new instance.
4039  *   -1 - lpfc get instance failed.
4040  **/
4041 int
4042 lpfc_get_instance(void)
4043 {
4044         int ret;
4045
4046         ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4047         return ret < 0 ? -1 : ret;
4048 }
4049
4050 /**
4051  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4052  * @shost: pointer to SCSI host data structure.
4053  * @time: elapsed time of the scan in jiffies.
4054  *
4055  * This routine is called by the SCSI layer with a SCSI host to determine
4056  * whether the scan host is finished.
4057  *
4058  * Note: there is no scan_start function as adapter initialization will have
4059  * asynchronously kicked off the link initialization.
4060  *
4061  * Return codes
4062  *   0 - SCSI host scan is not over yet.
4063  *   1 - SCSI host scan is over.
4064  **/
4065 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4066 {
4067         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4068         struct lpfc_hba   *phba = vport->phba;
4069         int stat = 0;
4070
4071         spin_lock_irq(shost->host_lock);
4072
4073         if (vport->load_flag & FC_UNLOADING) {
4074                 stat = 1;
4075                 goto finished;
4076         }
4077         if (time >= msecs_to_jiffies(30 * 1000)) {
4078                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4079                                 "0461 Scanning longer than 30 "
4080                                 "seconds.  Continuing initialization\n");
4081                 stat = 1;
4082                 goto finished;
4083         }
4084         if (time >= msecs_to_jiffies(15 * 1000) &&
4085             phba->link_state <= LPFC_LINK_DOWN) {
4086                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4087                                 "0465 Link down longer than 15 "
4088                                 "seconds.  Continuing initialization\n");
4089                 stat = 1;
4090                 goto finished;
4091         }
4092
4093         if (vport->port_state != LPFC_VPORT_READY)
4094                 goto finished;
4095         if (vport->num_disc_nodes || vport->fc_prli_sent)
4096                 goto finished;
4097         if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4098                 goto finished;
4099         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4100                 goto finished;
4101
4102         stat = 1;
4103
4104 finished:
4105         spin_unlock_irq(shost->host_lock);
4106         return stat;
4107 }
4108
4109 /**
4110  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4111  * @shost: pointer to SCSI host data structure.
4112  *
4113  * This routine initializes a given SCSI host attributes on a FC port. The
4114  * SCSI host can be either on top of a physical port or a virtual port.
4115  **/
4116 void lpfc_host_attrib_init(struct Scsi_Host *shost)
4117 {
4118         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4119         struct lpfc_hba   *phba = vport->phba;
4120         /*
4121          * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
4122          */
4123
4124         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4125         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4126         fc_host_supported_classes(shost) = FC_COS_CLASS3;
4127
4128         memset(fc_host_supported_fc4s(shost), 0,
4129                sizeof(fc_host_supported_fc4s(shost)));
4130         fc_host_supported_fc4s(shost)[2] = 1;
4131         fc_host_supported_fc4s(shost)[7] = 1;
4132
4133         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4134                                  sizeof fc_host_symbolic_name(shost));
4135
4136         fc_host_supported_speeds(shost) = 0;
4137         if (phba->lmt & LMT_64Gb)
4138                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4139         if (phba->lmt & LMT_32Gb)
4140                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4141         if (phba->lmt & LMT_16Gb)
4142                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4143         if (phba->lmt & LMT_10Gb)
4144                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4145         if (phba->lmt & LMT_8Gb)
4146                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4147         if (phba->lmt & LMT_4Gb)
4148                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4149         if (phba->lmt & LMT_2Gb)
4150                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4151         if (phba->lmt & LMT_1Gb)
4152                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4153
4154         fc_host_maxframe_size(shost) =
4155                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
4156                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
4157
4158         fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
4159
4160         /* This value is also unchanging */
4161         memset(fc_host_active_fc4s(shost), 0,
4162                sizeof(fc_host_active_fc4s(shost)));
4163         fc_host_active_fc4s(shost)[2] = 1;
4164         fc_host_active_fc4s(shost)[7] = 1;
4165
4166         fc_host_max_npiv_vports(shost) = phba->max_vpi;
4167         spin_lock_irq(shost->host_lock);
4168         vport->load_flag &= ~FC_LOADING;
4169         spin_unlock_irq(shost->host_lock);
4170 }
4171
4172 /**
4173  * lpfc_stop_port_s3 - Stop SLI3 device port
4174  * @phba: pointer to lpfc hba data structure.
4175  *
4176  * This routine is invoked to stop an SLI3 device port, it stops the device
4177  * from generating interrupts and stops the device driver's timers for the
4178  * device.
4179  **/
4180 static void
4181 lpfc_stop_port_s3(struct lpfc_hba *phba)
4182 {
4183         /* Clear all interrupt enable conditions */
4184         writel(0, phba->HCregaddr);
4185         readl(phba->HCregaddr); /* flush */
4186         /* Clear all pending interrupts */
4187         writel(0xffffffff, phba->HAregaddr);
4188         readl(phba->HAregaddr); /* flush */
4189
4190         /* Reset some HBA SLI setup states */
4191         lpfc_stop_hba_timers(phba);
4192         phba->pport->work_port_events = 0;
4193 }
4194
4195 /**
4196  * lpfc_stop_port_s4 - Stop SLI4 device port
4197  * @phba: pointer to lpfc hba data structure.
4198  *
4199  * This routine is invoked to stop an SLI4 device port, it stops the device
4200  * from generating interrupts and stops the device driver's timers for the
4201  * device.
4202  **/
4203 static void
4204 lpfc_stop_port_s4(struct lpfc_hba *phba)
4205 {
4206         /* Reset some HBA SLI4 setup states */
4207         lpfc_stop_hba_timers(phba);
4208         phba->pport->work_port_events = 0;
4209         phba->sli4_hba.intr_enable = 0;
4210 }
4211
4212 /**
4213  * lpfc_stop_port - Wrapper function for stopping hba port
4214  * @phba: Pointer to HBA context object.
4215  *
4216  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
4217  * the API jump table function pointer from the lpfc_hba struct.
4218  **/
4219 void
4220 lpfc_stop_port(struct lpfc_hba *phba)
4221 {
4222         phba->lpfc_stop_port(phba);
4223
4224         if (phba->wq)
4225                 flush_workqueue(phba->wq);
4226 }
4227
4228 /**
4229  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
4230  * @phba: Pointer to hba for which this call is being executed.
4231  *
4232  * This routine starts the timer waiting for the FCF rediscovery to complete.
4233  **/
4234 void
4235 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
4236 {
4237         unsigned long fcf_redisc_wait_tmo =
4238                 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
4239         /* Start fcf rediscovery wait period timer */
4240         mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
4241         spin_lock_irq(&phba->hbalock);
4242         /* Allow action to new fcf asynchronous event */
4243         phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
4244         /* Mark the FCF rediscovery pending state */
4245         phba->fcf.fcf_flag |= FCF_REDISC_PEND;
4246         spin_unlock_irq(&phba->hbalock);
4247 }
4248
4249 /**
4250  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4251  * @ptr: Map to lpfc_hba data structure pointer.
4252  *
4253  * This routine is invoked when waiting for FCF table rediscover has been
4254  * timed out. If new FCF record(s) has (have) been discovered during the
4255  * wait period, a new FCF event shall be added to the FCOE async event
4256  * list, and then worker thread shall be waked up for processing from the
4257  * worker thread context.
4258  **/
4259 static void
4260 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4261 {
4262         struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
4263
4264         /* Don't send FCF rediscovery event if timer cancelled */
4265         spin_lock_irq(&phba->hbalock);
4266         if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
4267                 spin_unlock_irq(&phba->hbalock);
4268                 return;
4269         }
4270         /* Clear FCF rediscovery timer pending flag */
4271         phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
4272         /* FCF rediscovery event to worker thread */
4273         phba->fcf.fcf_flag |= FCF_REDISC_EVT;
4274         spin_unlock_irq(&phba->hbalock);
4275         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
4276                         "2776 FCF rediscover quiescent timer expired\n");
4277         /* wake up worker thread */
4278         lpfc_worker_wake_up(phba);
4279 }
4280
4281 /**
4282  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
4283  * @phba: pointer to lpfc hba data structure.
4284  * @acqe_link: pointer to the async link completion queue entry.
4285  *
4286  * This routine is to parse the SLI4 link-attention link fault code.
4287  **/
4288 static void
4289 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4290                            struct lpfc_acqe_link *acqe_link)
4291 {
4292         switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4293         case LPFC_ASYNC_LINK_FAULT_NONE:
4294         case LPFC_ASYNC_LINK_FAULT_LOCAL:
4295         case LPFC_ASYNC_LINK_FAULT_REMOTE:
4296         case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4297                 break;
4298         default:
4299                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4300                                 "0398 Unknown link fault code: x%x\n",
4301                                 bf_get(lpfc_acqe_link_fault, acqe_link));
4302                 break;
4303         }
4304 }
4305
4306 /**
4307  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
4308  * @phba: pointer to lpfc hba data structure.
4309  * @acqe_link: pointer to the async link completion queue entry.
4310  *
4311  * This routine is to parse the SLI4 link attention type and translate it
4312  * into the base driver's link attention type coding.
4313  *
4314  * Return: Link attention type in terms of base driver's coding.
4315  **/
4316 static uint8_t
4317 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
4318                           struct lpfc_acqe_link *acqe_link)
4319 {
4320         uint8_t att_type;
4321
4322         switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
4323         case LPFC_ASYNC_LINK_STATUS_DOWN:
4324         case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
4325                 att_type = LPFC_ATT_LINK_DOWN;
4326                 break;
4327         case LPFC_ASYNC_LINK_STATUS_UP:
4328                 /* Ignore physical link up events - wait for logical link up */
4329                 att_type = LPFC_ATT_RESERVED;
4330                 break;
4331         case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
4332                 att_type = LPFC_ATT_LINK_UP;
4333                 break;
4334         default:
4335                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4336                                 "0399 Invalid link attention type: x%x\n",
4337                                 bf_get(lpfc_acqe_link_status, acqe_link));
4338                 att_type = LPFC_ATT_RESERVED;
4339                 break;
4340         }
4341         return att_type;
4342 }
4343
4344 /**
4345  * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
4346  * @phba: pointer to lpfc hba data structure.
4347  *
4348  * This routine is to get an SLI3 FC port's link speed in Mbps.
4349  *
4350  * Return: link speed in terms of Mbps.
4351  **/
4352 uint32_t
4353 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
4354 {
4355         uint32_t link_speed;
4356
4357         if (!lpfc_is_link_up(phba))
4358                 return 0;
4359
4360         if (phba->sli_rev <= LPFC_SLI_REV3) {
4361                 switch (phba->fc_linkspeed) {
4362                 case LPFC_LINK_SPEED_1GHZ:
4363                         link_speed = 1000;
4364                         break;
4365                 case LPFC_LINK_SPEED_2GHZ:
4366                         link_speed = 2000;
4367                         break;
4368                 case LPFC_LINK_SPEED_4GHZ:
4369                         link_speed = 4000;
4370                         break;
4371                 case LPFC_LINK_SPEED_8GHZ:
4372                         link_speed = 8000;
4373                         break;
4374                 case LPFC_LINK_SPEED_10GHZ:
4375                         link_speed = 10000;
4376                         break;
4377                 case LPFC_LINK_SPEED_16GHZ:
4378                         link_speed = 16000;
4379                         break;
4380                 default:
4381                         link_speed = 0;
4382                 }
4383         } else {
4384                 if (phba->sli4_hba.link_state.logical_speed)
4385                         link_speed =
4386                               phba->sli4_hba.link_state.logical_speed;
4387                 else
4388                         link_speed = phba->sli4_hba.link_state.speed;
4389         }
4390         return link_speed;
4391 }
4392
4393 /**
4394  * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
4395  * @phba: pointer to lpfc hba data structure.
4396  * @evt_code: asynchronous event code.
4397  * @speed_code: asynchronous event link speed code.
4398  *
4399  * This routine is to parse the giving SLI4 async event link speed code into
4400  * value of Mbps for the link speed.
4401  *
4402  * Return: link speed in terms of Mbps.
4403  **/
4404 static uint32_t
4405 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4406                            uint8_t speed_code)
4407 {
4408         uint32_t port_speed;
4409
4410         switch (evt_code) {
4411         case LPFC_TRAILER_CODE_LINK:
4412                 switch (speed_code) {
4413                 case LPFC_ASYNC_LINK_SPEED_ZERO:
4414                         port_speed = 0;
4415                         break;
4416                 case LPFC_ASYNC_LINK_SPEED_10MBPS:
4417                         port_speed = 10;
4418                         break;
4419                 case LPFC_ASYNC_LINK_SPEED_100MBPS:
4420                         port_speed = 100;
4421                         break;
4422                 case LPFC_ASYNC_LINK_SPEED_1GBPS:
4423                         port_speed = 1000;
4424                         break;
4425                 case LPFC_ASYNC_LINK_SPEED_10GBPS:
4426                         port_speed = 10000;
4427                         break;
4428                 case LPFC_ASYNC_LINK_SPEED_20GBPS:
4429                         port_speed = 20000;
4430                         break;
4431                 case LPFC_ASYNC_LINK_SPEED_25GBPS:
4432                         port_speed = 25000;
4433                         break;
4434                 case LPFC_ASYNC_LINK_SPEED_40GBPS:
4435                         port_speed = 40000;
4436                         break;
4437                 default:
4438                         port_speed = 0;
4439                 }
4440                 break;
4441         case LPFC_TRAILER_CODE_FC:
4442                 switch (speed_code) {
4443                 case LPFC_FC_LA_SPEED_UNKNOWN:
4444                         port_speed = 0;
4445                         break;
4446                 case LPFC_FC_LA_SPEED_1G:
4447                         port_speed = 1000;
4448                         break;
4449                 case LPFC_FC_LA_SPEED_2G:
4450                         port_speed = 2000;
4451                         break;
4452                 case LPFC_FC_LA_SPEED_4G:
4453                         port_speed = 4000;
4454                         break;
4455                 case LPFC_FC_LA_SPEED_8G:
4456                         port_speed = 8000;
4457                         break;
4458                 case LPFC_FC_LA_SPEED_10G:
4459                         port_speed = 10000;
4460                         break;
4461                 case LPFC_FC_LA_SPEED_16G:
4462                         port_speed = 16000;
4463                         break;
4464                 case LPFC_FC_LA_SPEED_32G:
4465                         port_speed = 32000;
4466                         break;
4467                 case LPFC_FC_LA_SPEED_64G:
4468                         port_speed = 64000;
4469                         break;
4470                 default:
4471                         port_speed = 0;
4472                 }
4473                 break;
4474         default:
4475                 port_speed = 0;
4476         }
4477         return port_speed;
4478 }
4479
4480 /**
4481  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
4482  * @phba: pointer to lpfc hba data structure.
4483  * @acqe_link: pointer to the async link completion queue entry.
4484  *
4485  * This routine is to handle the SLI4 asynchronous FCoE link event.
4486  **/
4487 static void
4488 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4489                          struct lpfc_acqe_link *acqe_link)
4490 {
4491         struct lpfc_dmabuf *mp;
4492         LPFC_MBOXQ_t *pmb;
4493         MAILBOX_t *mb;
4494         struct lpfc_mbx_read_top *la;
4495         uint8_t att_type;
4496         int rc;
4497
4498         att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
4499         if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
4500                 return;
4501         phba->fcoe_eventtag = acqe_link->event_tag;
4502         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4503         if (!pmb) {
4504                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4505                                 "0395 The mboxq allocation failed\n");
4506                 return;
4507         }
4508         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4509         if (!mp) {
4510                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4511                                 "0396 The lpfc_dmabuf allocation failed\n");
4512                 goto out_free_pmb;
4513         }
4514         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4515         if (!mp->virt) {
4516                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4517                                 "0397 The mbuf allocation failed\n");
4518                 goto out_free_dmabuf;
4519         }
4520
4521         /* Cleanup any outstanding ELS commands */
4522         lpfc_els_flush_all_cmd(phba);
4523
4524         /* Block ELS IOCBs until we have done process link event */
4525         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4526
4527         /* Update link event statistics */
4528         phba->sli.slistat.link_event++;
4529
4530         /* Create lpfc_handle_latt mailbox command from link ACQE */
4531         lpfc_read_topology(phba, pmb, mp);
4532         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4533         pmb->vport = phba->pport;
4534
4535         /* Keep the link status for extra SLI4 state machine reference */
4536         phba->sli4_hba.link_state.speed =
4537                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
4538                                 bf_get(lpfc_acqe_link_speed, acqe_link));
4539         phba->sli4_hba.link_state.duplex =
4540                                 bf_get(lpfc_acqe_link_duplex, acqe_link);
4541         phba->sli4_hba.link_state.status =
4542                                 bf_get(lpfc_acqe_link_status, acqe_link);
4543         phba->sli4_hba.link_state.type =
4544                                 bf_get(lpfc_acqe_link_type, acqe_link);
4545         phba->sli4_hba.link_state.number =
4546                                 bf_get(lpfc_acqe_link_number, acqe_link);
4547         phba->sli4_hba.link_state.fault =
4548                                 bf_get(lpfc_acqe_link_fault, acqe_link);
4549         phba->sli4_hba.link_state.logical_speed =
4550                         bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
4551
4552         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4553                         "2900 Async FC/FCoE Link event - Speed:%dGBit "
4554                         "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
4555                         "Logical speed:%dMbps Fault:%d\n",
4556                         phba->sli4_hba.link_state.speed,
4557                         phba->sli4_hba.link_state.topology,
4558                         phba->sli4_hba.link_state.status,
4559                         phba->sli4_hba.link_state.type,
4560                         phba->sli4_hba.link_state.number,
4561                         phba->sli4_hba.link_state.logical_speed,
4562                         phba->sli4_hba.link_state.fault);
4563         /*
4564          * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
4565          * topology info. Note: Optional for non FC-AL ports.
4566          */
4567         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
4568                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4569                 if (rc == MBX_NOT_FINISHED)
4570                         goto out_free_dmabuf;
4571                 return;
4572         }
4573         /*
4574          * For FCoE Mode: fill in all the topology information we need and call
4575          * the READ_TOPOLOGY completion routine to continue without actually
4576          * sending the READ_TOPOLOGY mailbox command to the port.
4577          */
4578         /* Initialize completion status */
4579         mb = &pmb->u.mb;
4580         mb->mbxStatus = MBX_SUCCESS;
4581
4582         /* Parse port fault information field */
4583         lpfc_sli4_parse_latt_fault(phba, acqe_link);
4584
4585         /* Parse and translate link attention fields */
4586         la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4587         la->eventTag = acqe_link->event_tag;
4588         bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4589         bf_set(lpfc_mbx_read_top_link_spd, la,
4590                (bf_get(lpfc_acqe_link_speed, acqe_link)));
4591
4592         /* Fake the the following irrelvant fields */
4593         bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4594         bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4595         bf_set(lpfc_mbx_read_top_il, la, 0);
4596         bf_set(lpfc_mbx_read_top_pb, la, 0);
4597         bf_set(lpfc_mbx_read_top_fa, la, 0);
4598         bf_set(lpfc_mbx_read_top_mm, la, 0);
4599
4600         /* Invoke the lpfc_handle_latt mailbox command callback function */
4601         lpfc_mbx_cmpl_read_topology(phba, pmb);
4602
4603         return;
4604
4605 out_free_dmabuf:
4606         kfree(mp);
4607 out_free_pmb:
4608         mempool_free(pmb, phba->mbox_mem_pool);
4609 }
4610
4611 /**
4612  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
4613  * @phba: pointer to lpfc hba data structure.
4614  * @acqe_fc: pointer to the async fc completion queue entry.
4615  *
4616  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
4617  * that the event was received and then issue a read_topology mailbox command so
4618  * that the rest of the driver will treat it the same as SLI3.
4619  **/
4620 static void
4621 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4622 {
4623         struct lpfc_dmabuf *mp;
4624         LPFC_MBOXQ_t *pmb;
4625         MAILBOX_t *mb;
4626         struct lpfc_mbx_read_top *la;
4627         int rc;
4628
4629         if (bf_get(lpfc_trailer_type, acqe_fc) !=
4630             LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
4631                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4632                                 "2895 Non FC link Event detected.(%d)\n",
4633                                 bf_get(lpfc_trailer_type, acqe_fc));
4634                 return;
4635         }
4636         /* Keep the link status for extra SLI4 state machine reference */
4637         phba->sli4_hba.link_state.speed =
4638                         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
4639                                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
4640         phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
4641         phba->sli4_hba.link_state.topology =
4642                                 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
4643         phba->sli4_hba.link_state.status =
4644                                 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
4645         phba->sli4_hba.link_state.type =
4646                                 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
4647         phba->sli4_hba.link_state.number =
4648                                 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
4649         phba->sli4_hba.link_state.fault =
4650                                 bf_get(lpfc_acqe_link_fault, acqe_fc);
4651         phba->sli4_hba.link_state.logical_speed =
4652                                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
4653         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4654                         "2896 Async FC event - Speed:%dGBaud Topology:x%x "
4655                         "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
4656                         "%dMbps Fault:%d\n",
4657                         phba->sli4_hba.link_state.speed,
4658                         phba->sli4_hba.link_state.topology,
4659                         phba->sli4_hba.link_state.status,
4660                         phba->sli4_hba.link_state.type,
4661                         phba->sli4_hba.link_state.number,
4662                         phba->sli4_hba.link_state.logical_speed,
4663                         phba->sli4_hba.link_state.fault);
4664         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4665         if (!pmb) {
4666                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4667                                 "2897 The mboxq allocation failed\n");
4668                 return;
4669         }
4670         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4671         if (!mp) {
4672                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4673                                 "2898 The lpfc_dmabuf allocation failed\n");
4674                 goto out_free_pmb;
4675         }
4676         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4677         if (!mp->virt) {
4678                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4679                                 "2899 The mbuf allocation failed\n");
4680                 goto out_free_dmabuf;
4681         }
4682
4683         /* Cleanup any outstanding ELS commands */
4684         lpfc_els_flush_all_cmd(phba);
4685
4686         /* Block ELS IOCBs until we have done process link event */
4687         phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
4688
4689         /* Update link event statistics */
4690         phba->sli.slistat.link_event++;
4691
4692         /* Create lpfc_handle_latt mailbox command from link ACQE */
4693         lpfc_read_topology(phba, pmb, mp);
4694         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4695         pmb->vport = phba->pport;
4696
4697         if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
4698                 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
4699
4700                 switch (phba->sli4_hba.link_state.status) {
4701                 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
4702                         phba->link_flag |= LS_MDS_LINK_DOWN;
4703                         break;
4704                 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
4705                         phba->link_flag |= LS_MDS_LOOPBACK;
4706                         break;
4707                 default:
4708                         break;
4709                 }
4710
4711                 /* Initialize completion status */
4712                 mb = &pmb->u.mb;
4713                 mb->mbxStatus = MBX_SUCCESS;
4714
4715                 /* Parse port fault information field */
4716                 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
4717
4718                 /* Parse and translate link attention fields */
4719                 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
4720                 la->eventTag = acqe_fc->event_tag;
4721
4722                 if (phba->sli4_hba.link_state.status ==
4723                     LPFC_FC_LA_TYPE_UNEXP_WWPN) {
4724                         bf_set(lpfc_mbx_read_top_att_type, la,
4725                                LPFC_FC_LA_TYPE_UNEXP_WWPN);
4726                 } else {
4727                         bf_set(lpfc_mbx_read_top_att_type, la,
4728                                LPFC_FC_LA_TYPE_LINK_DOWN);
4729                 }
4730                 /* Invoke the mailbox command callback function */
4731                 lpfc_mbx_cmpl_read_topology(phba, pmb);
4732
4733                 return;
4734         }
4735
4736         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4737         if (rc == MBX_NOT_FINISHED)
4738                 goto out_free_dmabuf;
4739         return;
4740
4741 out_free_dmabuf:
4742         kfree(mp);
4743 out_free_pmb:
4744         mempool_free(pmb, phba->mbox_mem_pool);
4745 }
4746
4747 /**
4748  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
4749  * @phba: pointer to lpfc hba data structure.
4750  * @acqe_fc: pointer to the async SLI completion queue entry.
4751  *
4752  * This routine is to handle the SLI4 asynchronous SLI events.
4753  **/
4754 static void
4755 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4756 {
4757         char port_name;
4758         char message[128];
4759         uint8_t status;
4760         uint8_t evt_type;
4761         uint8_t operational = 0;
4762         struct temp_event temp_event_data;
4763         struct lpfc_acqe_misconfigured_event *misconfigured;
4764         struct Scsi_Host  *shost;
4765
4766         evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4767
4768         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4769                         "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4770                         "x%08x SLI Event Type:%d\n",
4771                         acqe_sli->event_data1, acqe_sli->event_data2,
4772                         evt_type);
4773
4774         port_name = phba->Port[0];
4775         if (port_name == 0x00)
4776                 port_name = '?'; /* get port name is empty */
4777
4778         switch (evt_type) {
4779         case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
4780                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4781                 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
4782                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4783
4784                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4785                                 "3190 Over Temperature:%d Celsius- Port Name %c\n",
4786                                 acqe_sli->event_data1, port_name);
4787
4788                 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
4789                 shost = lpfc_shost_from_vport(phba->pport);
4790                 fc_host_post_vendor_event(shost, fc_get_event_number(),
4791                                           sizeof(temp_event_data),
4792                                           (char *)&temp_event_data,
4793                                           SCSI_NL_VID_TYPE_PCI
4794                                           | PCI_VENDOR_ID_EMULEX);
4795                 break;
4796         case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
4797                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4798                 temp_event_data.event_code = LPFC_NORMAL_TEMP;
4799                 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4800
4801                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4802                                 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
4803                                 acqe_sli->event_data1, port_name);
4804
4805                 shost = lpfc_shost_from_vport(phba->pport);
4806                 fc_host_post_vendor_event(shost, fc_get_event_number(),
4807                                           sizeof(temp_event_data),
4808                                           (char *)&temp_event_data,
4809                                           SCSI_NL_VID_TYPE_PCI
4810                                           | PCI_VENDOR_ID_EMULEX);
4811                 break;
4812         case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
4813                 misconfigured = (struct lpfc_acqe_misconfigured_event *)
4814                                         &acqe_sli->event_data1;
4815
4816                 /* fetch the status for this port */
4817                 switch (phba->sli4_hba.lnk_info.lnk_no) {
4818                 case LPFC_LINK_NUMBER_0:
4819                         status = bf_get(lpfc_sli_misconfigured_port0_state,
4820                                         &misconfigured->theEvent);
4821                         operational = bf_get(lpfc_sli_misconfigured_port0_op,
4822                                         &misconfigured->theEvent);
4823                         break;
4824                 case LPFC_LINK_NUMBER_1:
4825                         status = bf_get(lpfc_sli_misconfigured_port1_state,
4826                                         &misconfigured->theEvent);
4827                         operational = bf_get(lpfc_sli_misconfigured_port1_op,
4828                                         &misconfigured->theEvent);
4829                         break;
4830                 case LPFC_LINK_NUMBER_2:
4831                         status = bf_get(lpfc_sli_misconfigured_port2_state,
4832                                         &misconfigured->theEvent);
4833                         operational = bf_get(lpfc_sli_misconfigured_port2_op,
4834                                         &misconfigured->theEvent);
4835                         break;
4836                 case LPFC_LINK_NUMBER_3:
4837                         status = bf_get(lpfc_sli_misconfigured_port3_state,
4838                                         &misconfigured->theEvent);
4839                         operational = bf_get(lpfc_sli_misconfigured_port3_op,
4840                                         &misconfigured->theEvent);
4841                         break;
4842                 default:
4843                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4844                                         "3296 "
4845                                         "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
4846                                         "event: Invalid link %d",
4847                                         phba->sli4_hba.lnk_info.lnk_no);
4848                         return;
4849                 }
4850
4851                 /* Skip if optic state unchanged */
4852                 if (phba->sli4_hba.lnk_info.optic_state == status)
4853                         return;
4854
4855                 switch (status) {
4856                 case LPFC_SLI_EVENT_STATUS_VALID:
4857                         sprintf(message, "Physical Link is functional");
4858                         break;
4859                 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
4860                         sprintf(message, "Optics faulted/incorrectly "
4861                                 "installed/not installed - Reseat optics, "
4862                                 "if issue not resolved, replace.");
4863                         break;
4864                 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
4865                         sprintf(message,
4866                                 "Optics of two types installed - Remove one "
4867                                 "optic or install matching pair of optics.");
4868                         break;
4869                 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
4870                         sprintf(message, "Incompatible optics - Replace with "
4871                                 "compatible optics for card to function.");
4872                         break;
4873                 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
4874                         sprintf(message, "Unqualified optics - Replace with "
4875                                 "Avago optics for Warranty and Technical "
4876                                 "Support - Link is%s operational",
4877                                 (operational) ? " not" : "");
4878                         break;
4879                 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
4880                         sprintf(message, "Uncertified optics - Replace with "
4881                                 "Avago-certified optics to enable link "
4882                                 "operation - Link is%s operational",
4883                                 (operational) ? " not" : "");
4884                         break;
4885                 default:
4886                         /* firmware is reporting a status we don't know about */
4887                         sprintf(message, "Unknown event status x%02x", status);
4888                         break;
4889                 }
4890                 phba->sli4_hba.lnk_info.optic_state = status;
4891                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4892                                 "3176 Port Name %c %s\n", port_name, message);
4893                 break;
4894         case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
4895                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4896                                 "3192 Remote DPort Test Initiated - "
4897                                 "Event Data1:x%08x Event Data2: x%08x\n",
4898                                 acqe_sli->event_data1, acqe_sli->event_data2);
4899                 break;
4900         default:
4901                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4902                                 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
4903                                 "x%08x SLI Event Type:%d\n",
4904                                 acqe_sli->event_data1, acqe_sli->event_data2,
4905                                 evt_type);
4906                 break;
4907         }
4908 }
4909
4910 /**
4911  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4912  * @vport: pointer to vport data structure.
4913  *
4914  * This routine is to perform Clear Virtual Link (CVL) on a vport in
4915  * response to a CVL event.
4916  *
4917  * Return the pointer to the ndlp with the vport if successful, otherwise
4918  * return NULL.
4919  **/
4920 static struct lpfc_nodelist *
4921 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4922 {
4923         struct lpfc_nodelist *ndlp;
4924         struct Scsi_Host *shost;
4925         struct lpfc_hba *phba;
4926
4927         if (!vport)
4928                 return NULL;
4929         phba = vport->phba;
4930         if (!phba)
4931                 return NULL;
4932         ndlp = lpfc_findnode_did(vport, Fabric_DID);
4933         if (!ndlp) {
4934                 /* Cannot find existing Fabric ndlp, so allocate a new one */
4935                 ndlp = lpfc_nlp_init(vport, Fabric_DID);
4936                 if (!ndlp)
4937                         return 0;
4938                 /* Set the node type */
4939                 ndlp->nlp_type |= NLP_FABRIC;
4940                 /* Put ndlp onto node list */
4941                 lpfc_enqueue_node(vport, ndlp);
4942         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4943                 /* re-setup ndlp without removing from node list */
4944                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4945                 if (!ndlp)
4946                         return 0;
4947         }
4948         if ((phba->pport->port_state < LPFC_FLOGI) &&
4949                 (phba->pport->port_state != LPFC_VPORT_FAILED))
4950                 return NULL;
4951         /* If virtual link is not yet instantiated ignore CVL */
4952         if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4953                 && (vport->port_state != LPFC_VPORT_FAILED))
4954                 return NULL;
4955         shost = lpfc_shost_from_vport(vport);
4956         if (!shost)
4957                 return NULL;
4958         lpfc_linkdown_port(vport);
4959         lpfc_cleanup_pending_mbox(vport);
4960         spin_lock_irq(shost->host_lock);
4961         vport->fc_flag |= FC_VPORT_CVL_RCVD;
4962         spin_unlock_irq(shost->host_lock);
4963
4964         return ndlp;
4965 }
4966
4967 /**
4968  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4969  * @vport: pointer to lpfc hba data structure.
4970  *
4971  * This routine is to perform Clear Virtual Link (CVL) on all vports in
4972  * response to a FCF dead event.
4973  **/
4974 static void
4975 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4976 {
4977         struct lpfc_vport **vports;
4978         int i;
4979
4980         vports = lpfc_create_vport_work_array(phba);
4981         if (vports)
4982                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4983                         lpfc_sli4_perform_vport_cvl(vports[i]);
4984         lpfc_destroy_vport_work_array(phba, vports);
4985 }
4986
4987 /**
4988  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4989  * @phba: pointer to lpfc hba data structure.
4990  * @acqe_link: pointer to the async fcoe completion queue entry.
4991  *
4992  * This routine is to handle the SLI4 asynchronous fcoe event.
4993  **/
4994 static void
4995 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4996                         struct lpfc_acqe_fip *acqe_fip)
4997 {
4998         uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4999         int rc;
5000         struct lpfc_vport *vport;
5001         struct lpfc_nodelist *ndlp;
5002         struct Scsi_Host  *shost;
5003         int active_vlink_present;
5004         struct lpfc_vport **vports;
5005         int i;
5006
5007         phba->fc_eventTag = acqe_fip->event_tag;
5008         phba->fcoe_eventtag = acqe_fip->event_tag;
5009         switch (event_type) {
5010         case LPFC_FIP_EVENT_TYPE_NEW_FCF:
5011         case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
5012                 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5013                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5014                                         LOG_DISCOVERY,
5015                                         "2546 New FCF event, evt_tag:x%x, "
5016                                         "index:x%x\n",
5017                                         acqe_fip->event_tag,
5018                                         acqe_fip->index);
5019                 else
5020                         lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
5021                                         LOG_DISCOVERY,
5022                                         "2788 FCF param modified event, "
5023                                         "evt_tag:x%x, index:x%x\n",
5024                                         acqe_fip->event_tag,
5025                                         acqe_fip->index);
5026                 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5027                         /*
5028                          * During period of FCF discovery, read the FCF
5029                          * table record indexed by the event to update
5030                          * FCF roundrobin failover eligible FCF bmask.
5031                          */
5032                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5033                                         LOG_DISCOVERY,
5034                                         "2779 Read FCF (x%x) for updating "
5035                                         "roundrobin FCF failover bmask\n",
5036                                         acqe_fip->index);
5037                         rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
5038                 }
5039
5040                 /* If the FCF discovery is in progress, do nothing. */
5041                 spin_lock_irq(&phba->hbalock);
5042                 if (phba->hba_flag & FCF_TS_INPROG) {
5043                         spin_unlock_irq(&phba->hbalock);
5044                         break;
5045                 }
5046                 /* If fast FCF failover rescan event is pending, do nothing */
5047                 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
5048                         spin_unlock_irq(&phba->hbalock);
5049                         break;
5050                 }
5051
5052                 /* If the FCF has been in discovered state, do nothing. */
5053                 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
5054                         spin_unlock_irq(&phba->hbalock);
5055                         break;
5056                 }
5057                 spin_unlock_irq(&phba->hbalock);
5058
5059                 /* Otherwise, scan the entire FCF table and re-discover SAN */
5060                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5061                                 "2770 Start FCF table scan per async FCF "
5062                                 "event, evt_tag:x%x, index:x%x\n",
5063                                 acqe_fip->event_tag, acqe_fip->index);
5064                 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
5065                                                      LPFC_FCOE_FCF_GET_FIRST);
5066                 if (rc)
5067                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5068                                         "2547 Issue FCF scan read FCF mailbox "
5069                                         "command failed (x%x)\n", rc);
5070                 break;
5071
5072         case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5073                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5074                         "2548 FCF Table full count 0x%x tag 0x%x\n",
5075                         bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5076                         acqe_fip->event_tag);
5077                 break;
5078
5079         case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
5080                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5081                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5082                         "2549 FCF (x%x) disconnected from network, "
5083                         "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5084                 /*
5085                  * If we are in the middle of FCF failover process, clear
5086                  * the corresponding FCF bit in the roundrobin bitmap.
5087                  */
5088                 spin_lock_irq(&phba->hbalock);
5089                 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
5090                     (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
5091                         spin_unlock_irq(&phba->hbalock);
5092                         /* Update FLOGI FCF failover eligible FCF bmask */
5093                         lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
5094                         break;
5095                 }
5096                 spin_unlock_irq(&phba->hbalock);
5097
5098                 /* If the event is not for currently used fcf do nothing */
5099                 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
5100                         break;
5101
5102                 /*
5103                  * Otherwise, request the port to rediscover the entire FCF
5104                  * table for a fast recovery from case that the current FCF
5105                  * is no longer valid as we are not in the middle of FCF
5106                  * failover process already.
5107                  */
5108                 spin_lock_irq(&phba->hbalock);
5109                 /* Mark the fast failover process in progress */
5110                 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
5111                 spin_unlock_irq(&phba->hbalock);
5112
5113                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5114                                 "2771 Start FCF fast failover process due to "
5115                                 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
5116                                 "\n", acqe_fip->event_tag, acqe_fip->index);
5117                 rc = lpfc_sli4_redisc_fcf_table(phba);
5118                 if (rc) {
5119                         lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5120                                         LOG_DISCOVERY,
5121                                         "2772 Issue FCF rediscover mailbox "
5122                                         "command failed, fail through to FCF "
5123                                         "dead event\n");
5124                         spin_lock_irq(&phba->hbalock);
5125                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
5126                         spin_unlock_irq(&phba->hbalock);
5127                         /*
5128                          * Last resort will fail over by treating this
5129                          * as a link down to FCF registration.
5130                          */
5131                         lpfc_sli4_fcf_dead_failthrough(phba);
5132                 } else {
5133                         /* Reset FCF roundrobin bmask for new discovery */
5134                         lpfc_sli4_clear_fcf_rr_bmask(phba);
5135                         /*
5136                          * Handling fast FCF failover to a DEAD FCF event is
5137                          * considered equalivant to receiving CVL to all vports.
5138                          */
5139                         lpfc_sli4_perform_all_vport_cvl(phba);
5140                 }
5141                 break;
5142         case LPFC_FIP_EVENT_TYPE_CVL:
5143                 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5144                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5145                         "2718 Clear Virtual Link Received for VPI 0x%x"
5146                         " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
5147
5148                 vport = lpfc_find_vport_by_vpid(phba,
5149                                                 acqe_fip->index);
5150                 ndlp = lpfc_sli4_perform_vport_cvl(vport);
5151                 if (!ndlp)
5152                         break;
5153                 active_vlink_present = 0;
5154
5155                 vports = lpfc_create_vport_work_array(phba);
5156                 if (vports) {
5157                         for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5158                                         i++) {
5159                                 if ((!(vports[i]->fc_flag &
5160                                         FC_VPORT_CVL_RCVD)) &&
5161                                         (vports[i]->port_state > LPFC_FDISC)) {
5162                                         active_vlink_present = 1;
5163                                         break;
5164                                 }
5165                         }
5166                         lpfc_destroy_vport_work_array(phba, vports);
5167                 }
5168
5169                 /*
5170                  * Don't re-instantiate if vport is marked for deletion.
5171                  * If we are here first then vport_delete is going to wait
5172                  * for discovery to complete.
5173                  */
5174                 if (!(vport->load_flag & FC_UNLOADING) &&
5175                                         active_vlink_present) {
5176                         /*
5177                          * If there are other active VLinks present,
5178                          * re-instantiate the Vlink using FDISC.
5179                          */
5180                         mod_timer(&ndlp->nlp_delayfunc,
5181                                   jiffies + msecs_to_jiffies(1000));
5182                         shost = lpfc_shost_from_vport(vport);
5183                         spin_lock_irq(shost->host_lock);
5184                         ndlp->nlp_flag |= NLP_DELAY_TMO;
5185                         spin_unlock_irq(shost->host_lock);
5186                         ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
5187                         vport->port_state = LPFC_FDISC;
5188                 } else {
5189                         /*
5190                          * Otherwise, we request port to rediscover
5191                          * the entire FCF table for a fast recovery
5192                          * from possible case that the current FCF
5193                          * is no longer valid if we are not already
5194                          * in the FCF failover process.
5195                          */
5196                         spin_lock_irq(&phba->hbalock);
5197                         if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
5198                                 spin_unlock_irq(&phba->hbalock);
5199                                 break;
5200                         }
5201                         /* Mark the fast failover process in progress */
5202                         phba->fcf.fcf_flag |= FCF_ACVL_DISC;
5203                         spin_unlock_irq(&phba->hbalock);
5204                         lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
5205                                         LOG_DISCOVERY,
5206                                         "2773 Start FCF failover per CVL, "
5207                                         "evt_tag:x%x\n", acqe_fip->event_tag);
5208                         rc = lpfc_sli4_redisc_fcf_table(phba);
5209                         if (rc) {
5210                                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5211                                                 LOG_DISCOVERY,
5212                                                 "2774 Issue FCF rediscover "
5213                                                 "mailbox command failed, "
5214                                                 "through to CVL event\n");
5215                                 spin_lock_irq(&phba->hbalock);
5216                                 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
5217                                 spin_unlock_irq(&phba->hbalock);
5218                                 /*
5219                                  * Last resort will be re-try on the
5220                                  * the current registered FCF entry.
5221                                  */
5222                                 lpfc_retry_pport_discovery(phba);
5223                         } else
5224                                 /*
5225                                  * Reset FCF roundrobin bmask for new
5226                                  * discovery.
5227                                  */
5228                                 lpfc_sli4_clear_fcf_rr_bmask(phba);
5229                 }
5230                 break;
5231         default:
5232                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5233                         "0288 Unknown FCoE event type 0x%x event tag "
5234                         "0x%x\n", event_type, acqe_fip->event_tag);
5235                 break;
5236         }
5237 }
5238
5239 /**
5240  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
5241  * @phba: pointer to lpfc hba data structure.
5242  * @acqe_link: pointer to the async dcbx completion queue entry.
5243  *
5244  * This routine is to handle the SLI4 asynchronous dcbx event.
5245  **/
5246 static void
5247 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
5248                          struct lpfc_acqe_dcbx *acqe_dcbx)
5249 {
5250         phba->fc_eventTag = acqe_dcbx->event_tag;
5251         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5252                         "0290 The SLI4 DCBX asynchronous event is not "
5253                         "handled yet\n");
5254 }
5255
5256 /**
5257  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
5258  * @phba: pointer to lpfc hba data structure.
5259  * @acqe_link: pointer to the async grp5 completion queue entry.
5260  *
5261  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
5262  * is an asynchronous notified of a logical link speed change.  The Port
5263  * reports the logical link speed in units of 10Mbps.
5264  **/
5265 static void
5266 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
5267                          struct lpfc_acqe_grp5 *acqe_grp5)
5268 {
5269         uint16_t prev_ll_spd;
5270
5271         phba->fc_eventTag = acqe_grp5->event_tag;
5272         phba->fcoe_eventtag = acqe_grp5->event_tag;
5273         prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
5274         phba->sli4_hba.link_state.logical_speed =
5275                 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
5276         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5277                         "2789 GRP5 Async Event: Updating logical link speed "
5278                         "from %dMbps to %dMbps\n", prev_ll_spd,
5279                         phba->sli4_hba.link_state.logical_speed);
5280 }
5281
5282 /**
5283  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
5284  * @phba: pointer to lpfc hba data structure.
5285  *
5286  * This routine is invoked by the worker thread to process all the pending
5287  * SLI4 asynchronous events.
5288  **/
5289 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
5290 {
5291         struct lpfc_cq_event *cq_event;
5292
5293         /* First, declare the async event has been handled */
5294         spin_lock_irq(&phba->hbalock);
5295         phba->hba_flag &= ~ASYNC_EVENT;
5296         spin_unlock_irq(&phba->hbalock);
5297         /* Now, handle all the async events */
5298         while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5299                 /* Get the first event from the head of the event queue */
5300                 spin_lock_irq(&phba->hbalock);
5301                 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
5302                                  cq_event, struct lpfc_cq_event, list);
5303                 spin_unlock_irq(&phba->hbalock);
5304                 /* Process the asynchronous event */
5305                 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
5306                 case LPFC_TRAILER_CODE_LINK:
5307                         lpfc_sli4_async_link_evt(phba,
5308                                                  &cq_event->cqe.acqe_link);
5309                         break;
5310                 case LPFC_TRAILER_CODE_FCOE:
5311                         lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
5312                         break;
5313                 case LPFC_TRAILER_CODE_DCBX:
5314                         lpfc_sli4_async_dcbx_evt(phba,
5315                                                  &cq_event->cqe.acqe_dcbx);
5316                         break;
5317                 case LPFC_TRAILER_CODE_GRP5:
5318                         lpfc_sli4_async_grp5_evt(phba,
5319                                                  &cq_event->cqe.acqe_grp5);
5320                         break;
5321                 case LPFC_TRAILER_CODE_FC:
5322                         lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
5323                         break;
5324                 case LPFC_TRAILER_CODE_SLI:
5325                         lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
5326                         break;
5327                 default:
5328                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5329                                         "1804 Invalid asynchrous event code: "
5330                                         "x%x\n", bf_get(lpfc_trailer_code,
5331                                         &cq_event->cqe.mcqe_cmpl));
5332                         break;
5333                 }
5334                 /* Free the completion event processed to the free pool */
5335                 lpfc_sli4_cq_event_release(phba, cq_event);
5336         }
5337 }
5338
5339 /**
5340  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
5341  * @phba: pointer to lpfc hba data structure.
5342  *
5343  * This routine is invoked by the worker thread to process FCF table
5344  * rediscovery pending completion event.
5345  **/
5346 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
5347 {
5348         int rc;
5349
5350         spin_lock_irq(&phba->hbalock);
5351         /* Clear FCF rediscovery timeout event */
5352         phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
5353         /* Clear driver fast failover FCF record flag */
5354         phba->fcf.failover_rec.flag = 0;
5355         /* Set state for FCF fast failover */
5356         phba->fcf.fcf_flag |= FCF_REDISC_FOV;
5357         spin_unlock_irq(&phba->hbalock);
5358
5359         /* Scan FCF table from the first entry to re-discover SAN */
5360         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
5361                         "2777 Start post-quiescent FCF table scan\n");
5362         rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5363         if (rc)
5364                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5365                                 "2747 Issue FCF scan read FCF mailbox "
5366                                 "command failed 0x%x\n", rc);
5367 }
5368
5369 /**
5370  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
5371  * @phba: pointer to lpfc hba data structure.
5372  * @dev_grp: The HBA PCI-Device group number.
5373  *
5374  * This routine is invoked to set up the per HBA PCI-Device group function
5375  * API jump table entries.
5376  *
5377  * Return: 0 if success, otherwise -ENODEV
5378  **/
5379 int
5380 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5381 {
5382         int rc;
5383
5384         /* Set up lpfc PCI-device group */
5385         phba->pci_dev_grp = dev_grp;
5386
5387         /* The LPFC_PCI_DEV_OC uses SLI4 */
5388         if (dev_grp == LPFC_PCI_DEV_OC)
5389                 phba->sli_rev = LPFC_SLI_REV4;
5390
5391         /* Set up device INIT API function jump table */
5392         rc = lpfc_init_api_table_setup(phba, dev_grp);
5393         if (rc)
5394                 return -ENODEV;
5395         /* Set up SCSI API function jump table */
5396         rc = lpfc_scsi_api_table_setup(phba, dev_grp);
5397         if (rc)
5398                 return -ENODEV;
5399         /* Set up SLI API function jump table */
5400         rc = lpfc_sli_api_table_setup(phba, dev_grp);
5401         if (rc)
5402                 return -ENODEV;
5403         /* Set up MBOX API function jump table */
5404         rc = lpfc_mbox_api_table_setup(phba, dev_grp);
5405         if (rc)
5406                 return -ENODEV;
5407
5408         return 0;
5409 }
5410
5411 /**
5412  * lpfc_log_intr_mode - Log the active interrupt mode
5413  * @phba: pointer to lpfc hba data structure.
5414  * @intr_mode: active interrupt mode adopted.
5415  *
5416  * This routine it invoked to log the currently used active interrupt mode
5417  * to the device.
5418  **/
5419 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
5420 {
5421         switch (intr_mode) {
5422         case 0:
5423                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5424                                 "0470 Enable INTx interrupt mode.\n");
5425                 break;
5426         case 1:
5427                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5428                                 "0481 Enabled MSI interrupt mode.\n");
5429                 break;
5430         case 2:
5431                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5432                                 "0480 Enabled MSI-X interrupt mode.\n");
5433                 break;
5434         default:
5435                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5436                                 "0482 Illegal interrupt mode.\n");
5437                 break;
5438         }
5439         return;
5440 }
5441
5442 /**
5443  * lpfc_enable_pci_dev - Enable a generic PCI device.
5444  * @phba: pointer to lpfc hba data structure.
5445  *
5446  * This routine is invoked to enable the PCI device that is common to all
5447  * PCI devices.
5448  *
5449  * Return codes
5450  *      0 - successful
5451  *      other values - error
5452  **/
5453 static int
5454 lpfc_enable_pci_dev(struct lpfc_hba *phba)
5455 {
5456         struct pci_dev *pdev;
5457
5458         /* Obtain PCI device reference */
5459         if (!phba->pcidev)
5460                 goto out_error;
5461         else
5462                 pdev = phba->pcidev;
5463         /* Enable PCI device */
5464         if (pci_enable_device_mem(pdev))
5465                 goto out_error;
5466         /* Request PCI resource for the device */
5467         if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
5468                 goto out_disable_device;
5469         /* Set up device as PCI master and save state for EEH */
5470         pci_set_master(pdev);
5471         pci_try_set_mwi(pdev);
5472         pci_save_state(pdev);
5473
5474         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5475         if (pci_is_pcie(pdev))
5476                 pdev->needs_freset = 1;
5477
5478         return 0;
5479
5480 out_disable_device:
5481         pci_disable_device(pdev);
5482 out_error:
5483         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5484                         "1401 Failed to enable pci device\n");
5485         return -ENODEV;
5486 }
5487
5488 /**
5489  * lpfc_disable_pci_dev - Disable a generic PCI device.
5490  * @phba: pointer to lpfc hba data structure.
5491  *
5492  * This routine is invoked to disable the PCI device that is common to all
5493  * PCI devices.
5494  **/
5495 static void
5496 lpfc_disable_pci_dev(struct lpfc_hba *phba)
5497 {
5498         struct pci_dev *pdev;
5499
5500         /* Obtain PCI device reference */
5501         if (!phba->pcidev)
5502                 return;
5503         else
5504                 pdev = phba->pcidev;
5505         /* Release PCI resource and disable PCI device */
5506         pci_release_mem_regions(pdev);
5507         pci_disable_device(pdev);
5508
5509         return;
5510 }
5511
5512 /**
5513  * lpfc_reset_hba - Reset a hba
5514  * @phba: pointer to lpfc hba data structure.
5515  *
5516  * This routine is invoked to reset a hba device. It brings the HBA
5517  * offline, performs a board restart, and then brings the board back
5518  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
5519  * on outstanding mailbox commands.
5520  **/
5521 void
5522 lpfc_reset_hba(struct lpfc_hba *phba)
5523 {
5524         /* If resets are disabled then set error state and return. */
5525         if (!phba->cfg_enable_hba_reset) {
5526                 phba->link_state = LPFC_HBA_ERROR;
5527                 return;
5528         }
5529         if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
5530                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5531         else
5532                 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
5533         lpfc_offline(phba);
5534         lpfc_sli_brdrestart(phba);
5535         lpfc_online(phba);
5536         lpfc_unblock_mgmt_io(phba);
5537 }
5538
5539 /**
5540  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
5541  * @phba: pointer to lpfc hba data structure.
5542  *
5543  * This function enables the PCI SR-IOV virtual functions to a physical
5544  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
5545  * enable the number of virtual functions to the physical function. As
5546  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
5547  * API call does not considered as an error condition for most of the device.
5548  **/
5549 uint16_t
5550 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
5551 {
5552         struct pci_dev *pdev = phba->pcidev;
5553         uint16_t nr_virtfn;
5554         int pos;
5555
5556         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
5557         if (pos == 0)
5558                 return 0;
5559
5560         pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
5561         return nr_virtfn;
5562 }
5563
5564 /**
5565  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
5566  * @phba: pointer to lpfc hba data structure.
5567  * @nr_vfn: number of virtual functions to be enabled.
5568  *
5569  * This function enables the PCI SR-IOV virtual functions to a physical
5570  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
5571  * enable the number of virtual functions to the physical function. As
5572  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
5573  * API call does not considered as an error condition for most of the device.
5574  **/
5575 int
5576 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
5577 {
5578         struct pci_dev *pdev = phba->pcidev;
5579         uint16_t max_nr_vfn;
5580         int rc;
5581
5582         max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
5583         if (nr_vfn > max_nr_vfn) {
5584                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5585                                 "3057 Requested vfs (%d) greater than "
5586                                 "supported vfs (%d)", nr_vfn, max_nr_vfn);
5587                 return -EINVAL;
5588         }
5589
5590         rc = pci_enable_sriov(pdev, nr_vfn);
5591         if (rc) {
5592                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5593                                 "2806 Failed to enable sriov on this device "
5594                                 "with vfn number nr_vf:%d, rc:%d\n",
5595                                 nr_vfn, rc);
5596         } else
5597                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5598                                 "2807 Successful enable sriov on this device "
5599                                 "with vfn number nr_vf:%d\n", nr_vfn);
5600         return rc;
5601 }
5602
5603 /**
5604  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5605  * @phba: pointer to lpfc hba data structure.
5606  *
5607  * This routine is invoked to set up the driver internal resources before the
5608  * device specific resource setup to support the HBA device it attached to.
5609  *
5610  * Return codes
5611  *      0 - successful
5612  *      other values - error
5613  **/
5614 static int
5615 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5616 {
5617         struct lpfc_sli *psli = &phba->sli;
5618
5619         /*
5620          * Driver resources common to all SLI revisions
5621          */
5622         atomic_set(&phba->fast_event_count, 0);
5623         spin_lock_init(&phba->hbalock);
5624
5625         /* Initialize ndlp management spinlock */
5626         spin_lock_init(&phba->ndlp_lock);
5627
5628         /* Initialize port_list spinlock */
5629         spin_lock_init(&phba->port_list_lock);
5630         INIT_LIST_HEAD(&phba->port_list);
5631
5632         INIT_LIST_HEAD(&phba->work_list);
5633         init_waitqueue_head(&phba->wait_4_mlo_m_q);
5634
5635         /* Initialize the wait queue head for the kernel thread */
5636         init_waitqueue_head(&phba->work_waitq);
5637
5638         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5639                         "1403 Protocols supported %s %s %s\n",
5640                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
5641                                 "SCSI" : " "),
5642                         ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
5643                                 "NVME" : " "),
5644                         (phba->nvmet_support ? "NVMET" : " "));
5645
5646         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
5647                 /* Initialize the scsi buffer list used by driver for scsi IO */
5648                 spin_lock_init(&phba->scsi_buf_list_get_lock);
5649                 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5650                 spin_lock_init(&phba->scsi_buf_list_put_lock);
5651                 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5652         }
5653
5654         if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
5655                 (phba->nvmet_support == 0)) {
5656                 /* Initialize the NVME buffer list used by driver for NVME IO */
5657                 spin_lock_init(&phba->nvme_buf_list_get_lock);
5658                 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
5659                 phba->get_nvme_bufs = 0;
5660                 spin_lock_init(&phba->nvme_buf_list_put_lock);
5661                 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
5662                 phba->put_nvme_bufs = 0;
5663         }
5664
5665         /* Initialize the fabric iocb list */
5666         INIT_LIST_HEAD(&phba->fabric_iocb_list);
5667
5668         /* Initialize list to save ELS buffers */
5669         INIT_LIST_HEAD(&phba->elsbuf);
5670
5671         /* Initialize FCF connection rec list */
5672         INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5673
5674         /* Initialize OAS configuration list */
5675         spin_lock_init(&phba->devicelock);
5676         INIT_LIST_HEAD(&phba->luns);
5677
5678         /* MBOX heartbeat timer */
5679         timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
5680         /* Fabric block timer */
5681         timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
5682         /* EA polling mode timer */
5683         timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
5684         /* Heartbeat timer */
5685         timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
5686
5687         return 0;
5688 }
5689
5690 /**
5691  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
5692  * @phba: pointer to lpfc hba data structure.
5693  *
5694  * This routine is invoked to set up the driver internal resources specific to
5695  * support the SLI-3 HBA device it attached to.
5696  *
5697  * Return codes
5698  * 0 - successful
5699  * other values - error
5700  **/
5701 static int
5702 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5703 {
5704         int rc;
5705
5706         /*
5707          * Initialize timers used by driver
5708          */
5709
5710         /* FCP polling mode timer */
5711         timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
5712
5713         /* Host attention work mask setup */
5714         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
5715         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
5716
5717         /* Get all the module params for configuring this host */
5718         lpfc_get_cfgparam(phba);
5719         /* Set up phase-1 common device driver resources */
5720
5721         rc = lpfc_setup_driver_resource_phase1(phba);
5722         if (rc)
5723                 return -ENODEV;
5724
5725         if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
5726                 phba->menlo_flag |= HBA_MENLO_SUPPORT;
5727                 /* check for menlo minimum sg count */
5728                 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
5729                         phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
5730         }
5731
5732         if (!phba->sli.sli3_ring)
5733                 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
5734                                               sizeof(struct lpfc_sli_ring),
5735                                               GFP_KERNEL);
5736         if (!phba->sli.sli3_ring)
5737                 return -ENOMEM;
5738
5739         /*
5740          * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5741          * used to create the sg_dma_buf_pool must be dynamically calculated.
5742          */
5743
5744         /* Initialize the host templates the configured values. */
5745         lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5746         lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5747         lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5748
5749         /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
5750         if (phba->cfg_enable_bg) {
5751                 /*
5752                  * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5753                  * the FCP rsp, and a BDE for each. Sice we have no control
5754                  * over how many protection data segments the SCSI Layer
5755                  * will hand us (ie: there could be one for every block
5756                  * in the IO), we just allocate enough BDEs to accomidate
5757                  * our max amount and we need to limit lpfc_sg_seg_cnt to
5758                  * minimize the risk of running out.
5759                  */
5760                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5761                         sizeof(struct fcp_rsp) +
5762                         (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
5763
5764                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
5765                         phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
5766
5767                 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
5768                 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
5769         } else {
5770                 /*
5771                  * The scsi_buf for a regular I/O will hold the FCP cmnd,
5772                  * the FCP rsp, a BDE for each, and a BDE for up to
5773                  * cfg_sg_seg_cnt data segments.
5774                  */
5775                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5776                         sizeof(struct fcp_rsp) +
5777                         ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
5778
5779                 /* Total BDEs in BPL for scsi_sg_list */
5780                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
5781         }
5782
5783         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5784                         "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
5785                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5786                         phba->cfg_total_seg_cnt);
5787
5788         phba->max_vpi = LPFC_MAX_VPI;
5789         /* This will be set to correct value after config_port mbox */
5790         phba->max_vports = 0;
5791
5792         /*
5793          * Initialize the SLI Layer to run with lpfc HBAs.
5794          */
5795         lpfc_sli_setup(phba);
5796         lpfc_sli_queue_init(phba);
5797
5798         /* Allocate device driver memory */
5799         if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
5800                 return -ENOMEM;
5801
5802         /*
5803          * Enable sr-iov virtual functions if supported and configured
5804          * through the module parameter.
5805          */
5806         if (phba->cfg_sriov_nr_virtfn > 0) {
5807                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5808                                                  phba->cfg_sriov_nr_virtfn);
5809                 if (rc) {
5810                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5811                                         "2808 Requested number of SR-IOV "
5812                                         "virtual functions (%d) is not "
5813                                         "supported\n",
5814                                         phba->cfg_sriov_nr_virtfn);
5815                         phba->cfg_sriov_nr_virtfn = 0;
5816                 }
5817         }
5818
5819         return 0;
5820 }
5821
5822 /**
5823  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
5824  * @phba: pointer to lpfc hba data structure.
5825  *
5826  * This routine is invoked to unset the driver internal resources set up
5827  * specific for supporting the SLI-3 HBA device it attached to.
5828  **/
5829 static void
5830 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
5831 {
5832         /* Free device driver memory allocated */
5833         lpfc_mem_free_all(phba);
5834
5835         return;
5836 }
5837
5838 /**
5839  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
5840  * @phba: pointer to lpfc hba data structure.
5841  *
5842  * This routine is invoked to set up the driver internal resources specific to
5843  * support the SLI-4 HBA device it attached to.
5844  *
5845  * Return codes
5846  *      0 - successful
5847  *      other values - error
5848  **/
5849 static int
5850 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5851 {
5852         LPFC_MBOXQ_t *mboxq;
5853         MAILBOX_t *mb;
5854         int rc, i, max_buf_size;
5855         uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
5856         struct lpfc_mqe *mqe;
5857         int longs;
5858         int fof_vectors = 0;
5859         int extra;
5860         uint64_t wwn;
5861         u32 if_type;
5862         u32 if_fam;
5863
5864         phba->sli4_hba.num_online_cpu = num_online_cpus();
5865         phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
5866         phba->sli4_hba.curr_disp_cpu = 0;
5867
5868         /* Get all the module params for configuring this host */
5869         lpfc_get_cfgparam(phba);
5870
5871         /* Set up phase-1 common device driver resources */
5872         rc = lpfc_setup_driver_resource_phase1(phba);
5873         if (rc)
5874                 return -ENODEV;
5875
5876         /* Before proceed, wait for POST done and device ready */
5877         rc = lpfc_sli4_post_status_check(phba);
5878         if (rc)
5879                 return -ENODEV;
5880
5881         /*
5882          * Initialize timers used by driver
5883          */
5884
5885         timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
5886
5887         /* FCF rediscover timer */
5888         timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
5889
5890         /*
5891          * Control structure for handling external multi-buffer mailbox
5892          * command pass-through.
5893          */
5894         memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
5895                 sizeof(struct lpfc_mbox_ext_buf_ctx));
5896         INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
5897
5898         phba->max_vpi = LPFC_MAX_VPI;
5899
5900         /* This will be set to correct value after the read_config mbox */
5901         phba->max_vports = 0;
5902
5903         /* Program the default value of vlan_id and fc_map */
5904         phba->valid_vlan = 0;
5905         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5906         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5907         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5908
5909         /*
5910          * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
5911          * we will associate a new ring, for each EQ/CQ/WQ tuple.
5912          * The WQ create will allocate the ring.
5913          */
5914
5915         /*
5916          * 1 for cmd, 1 for rsp, NVME adds an extra one
5917          * for boundary conditions in its max_sgl_segment template.
5918          */
5919         extra = 2;
5920         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
5921                 extra++;
5922
5923         /*
5924          * It doesn't matter what family our adapter is in, we are
5925          * limited to 2 Pages, 512 SGEs, for our SGL.
5926          * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5927          */
5928         max_buf_size = (2 * SLI4_PAGE_SIZE);
5929
5930         /*
5931          * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
5932          * used to create the sg_dma_buf_pool must be calculated.
5933          */
5934         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5935                 /*
5936                  * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
5937                  * the FCP rsp, and a SGE. Sice we have no control
5938                  * over how many protection segments the SCSI Layer
5939                  * will hand us (ie: there could be one for every block
5940                  * in the IO), just allocate enough SGEs to accomidate
5941                  * our max amount and we need to limit lpfc_sg_seg_cnt
5942                  * to minimize the risk of running out.
5943                  */
5944                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5945                                 sizeof(struct fcp_rsp) + max_buf_size;
5946
5947                 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5948                 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5949
5950                 /*
5951                  * If supporting DIF, reduce the seg count for scsi to
5952                  * allow room for the DIF sges.
5953                  */
5954                 if (phba->cfg_enable_bg &&
5955                     phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
5956                         phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
5957                 else
5958                         phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
5959
5960         } else {
5961                 /*
5962                  * The scsi_buf for a regular I/O holds the FCP cmnd,
5963                  * the FCP rsp, a SGE for each, and a SGE for up to
5964                  * cfg_sg_seg_cnt data segments.
5965                  */
5966                 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5967                                 sizeof(struct fcp_rsp) +
5968                                 ((phba->cfg_sg_seg_cnt + extra) *
5969                                 sizeof(struct sli4_sge));
5970
5971                 /* Total SGEs for scsi_sg_list */
5972                 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
5973                 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
5974
5975                 /*
5976                  * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
5977                  * need to post 1 page for the SGL.
5978                  */
5979         }
5980
5981         /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
5982         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
5983                 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
5984                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
5985                                         "6300 Reducing NVME sg segment "
5986                                         "cnt to %d\n",
5987                                         LPFC_MAX_NVME_SEG_CNT);
5988                         phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
5989                 } else
5990                         phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
5991         }
5992
5993         /* Initialize the host templates with the updated values. */
5994         lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
5995         lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
5996         lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
5997
5998         if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
5999                 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6000         else
6001                 phba->cfg_sg_dma_buf_size =
6002                         SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6003
6004         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6005                         "9087 sg_seg_cnt:%d dmabuf_size:%d "
6006                         "total:%d scsi:%d nvme:%d\n",
6007                         phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6008                         phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
6009                         phba->cfg_nvme_seg_cnt);
6010
6011         /* Initialize buffer queue management fields */
6012         INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6013         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
6014         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
6015
6016         /*
6017          * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6018          */
6019         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6020                 /* Initialize the Abort scsi buffer list used by driver */
6021                 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
6022                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
6023         }
6024
6025         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6026                 /* Initialize the Abort nvme buffer list used by driver */
6027                 spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
6028                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
6029                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6030                 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6031         }
6032
6033         /* This abort list used by worker thread */
6034         spin_lock_init(&phba->sli4_hba.sgl_list_lock);
6035         spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6036
6037         /*
6038          * Initialize driver internal slow-path work queues
6039          */
6040
6041         /* Driver internel slow-path CQ Event pool */
6042         INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
6043         /* Response IOCB work queue list */
6044         INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
6045         /* Asynchronous event CQ Event work queue list */
6046         INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6047         /* Fast-path XRI aborted CQ Event work queue list */
6048         INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
6049         /* Slow-path XRI aborted CQ Event work queue list */
6050         INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
6051         /* Receive queue CQ Event work queue list */
6052         INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
6053
6054         /* Initialize extent block lists. */
6055         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
6056         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
6057         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
6058         INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
6059
6060         /* Initialize mboxq lists. If the early init routines fail
6061          * these lists need to be correctly initialized.
6062          */
6063         INIT_LIST_HEAD(&phba->sli.mboxq);
6064         INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
6065
6066         /* initialize optic_state to 0xFF */
6067         phba->sli4_hba.lnk_info.optic_state = 0xff;
6068
6069         /* Allocate device driver memory */
6070         rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
6071         if (rc)
6072                 return -ENOMEM;
6073
6074         /* IF Type 2 ports get initialized now. */
6075         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
6076             LPFC_SLI_INTF_IF_TYPE_2) {
6077                 rc = lpfc_pci_function_reset(phba);
6078                 if (unlikely(rc)) {
6079                         rc = -ENODEV;
6080                         goto out_free_mem;
6081                 }
6082                 phba->temp_sensor_support = 1;
6083         }
6084
6085         /* Create the bootstrap mailbox command */
6086         rc = lpfc_create_bootstrap_mbox(phba);
6087         if (unlikely(rc))
6088                 goto out_free_mem;
6089
6090         /* Set up the host's endian order with the device. */
6091         rc = lpfc_setup_endian_order(phba);
6092         if (unlikely(rc))
6093                 goto out_free_bsmbx;
6094
6095         /* Set up the hba's configuration parameters. */
6096         rc = lpfc_sli4_read_config(phba);
6097         if (unlikely(rc))
6098                 goto out_free_bsmbx;
6099         rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
6100         if (unlikely(rc))
6101                 goto out_free_bsmbx;
6102
6103         /* IF Type 0 ports get initialized now. */
6104         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6105             LPFC_SLI_INTF_IF_TYPE_0) {
6106                 rc = lpfc_pci_function_reset(phba);
6107                 if (unlikely(rc))
6108                         goto out_free_bsmbx;
6109         }
6110
6111         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6112                                                        GFP_KERNEL);
6113         if (!mboxq) {
6114                 rc = -ENOMEM;
6115                 goto out_free_bsmbx;
6116         }
6117
6118         /* Check for NVMET being configured */
6119         phba->nvmet_support = 0;
6120         if (lpfc_enable_nvmet_cnt) {
6121
6122                 /* First get WWN of HBA instance */
6123                 lpfc_read_nv(phba, mboxq);
6124                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6125                 if (rc != MBX_SUCCESS) {
6126                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6127                                         "6016 Mailbox failed , mbxCmd x%x "
6128                                         "READ_NV, mbxStatus x%x\n",
6129                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6130                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe));
6131                         mempool_free(mboxq, phba->mbox_mem_pool);
6132                         rc = -EIO;
6133                         goto out_free_bsmbx;
6134                 }
6135                 mb = &mboxq->u.mb;
6136                 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
6137                        sizeof(uint64_t));
6138                 wwn = cpu_to_be64(wwn);
6139                 phba->sli4_hba.wwnn.u.name = wwn;
6140                 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
6141                        sizeof(uint64_t));
6142                 /* wwn is WWPN of HBA instance */
6143                 wwn = cpu_to_be64(wwn);
6144                 phba->sli4_hba.wwpn.u.name = wwn;
6145
6146                 /* Check to see if it matches any module parameter */
6147                 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
6148                         if (wwn == lpfc_enable_nvmet[i]) {
6149 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
6150                                 if (lpfc_nvmet_mem_alloc(phba))
6151                                         break;
6152
6153                                 phba->nvmet_support = 1; /* a match */
6154
6155                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6156                                                 "6017 NVME Target %016llx\n",
6157                                                 wwn);
6158 #else
6159                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6160                                                 "6021 Can't enable NVME Target."
6161                                                 " NVME_TARGET_FC infrastructure"
6162                                                 " is not in kernel\n");
6163 #endif
6164                                 break;
6165                         }
6166                 }
6167         }
6168
6169         lpfc_nvme_mod_param_dep(phba);
6170
6171         /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
6172         lpfc_supported_pages(mboxq);
6173         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6174         if (!rc) {
6175                 mqe = &mboxq->u.mqe;
6176                 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
6177                        LPFC_MAX_SUPPORTED_PAGES);
6178                 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
6179                         switch (pn_page[i]) {
6180                         case LPFC_SLI4_PARAMETERS:
6181                                 phba->sli4_hba.pc_sli4_params.supported = 1;
6182                                 break;
6183                         default:
6184                                 break;
6185                         }
6186                 }
6187                 /* Read the port's SLI4 Parameters capabilities if supported. */
6188                 if (phba->sli4_hba.pc_sli4_params.supported)
6189                         rc = lpfc_pc_sli4_params_get(phba, mboxq);
6190                 if (rc) {
6191                         mempool_free(mboxq, phba->mbox_mem_pool);
6192                         rc = -EIO;
6193                         goto out_free_bsmbx;
6194                 }
6195         }
6196
6197         /*
6198          * Get sli4 parameters that override parameters from Port capabilities.
6199          * If this call fails, it isn't critical unless the SLI4 parameters come
6200          * back in conflict.
6201          */
6202         rc = lpfc_get_sli4_parameters(phba, mboxq);
6203         if (rc) {
6204                 if_type = bf_get(lpfc_sli_intf_if_type,
6205                                  &phba->sli4_hba.sli_intf);
6206                 if_fam = bf_get(lpfc_sli_intf_sli_family,
6207                                 &phba->sli4_hba.sli_intf);
6208                 if (phba->sli4_hba.extents_in_use &&
6209                     phba->sli4_hba.rpi_hdrs_in_use) {
6210                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6211                                 "2999 Unsupported SLI4 Parameters "
6212                                 "Extents and RPI headers enabled.\n");
6213                         if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6214                             if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
6215                                 mempool_free(mboxq, phba->mbox_mem_pool);
6216                                 rc = -EIO;
6217                                 goto out_free_bsmbx;
6218                         }
6219                 }
6220                 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6221                       if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6222                         mempool_free(mboxq, phba->mbox_mem_pool);
6223                         rc = -EIO;
6224                         goto out_free_bsmbx;
6225                 }
6226         }
6227
6228         mempool_free(mboxq, phba->mbox_mem_pool);
6229
6230         /* Verify OAS is supported */
6231         lpfc_sli4_oas_verify(phba);
6232         if (phba->cfg_fof)
6233                 fof_vectors = 1;
6234
6235         /* Verify RAS support on adapter */
6236         lpfc_sli4_ras_init(phba);
6237
6238         /* Verify all the SLI4 queues */
6239         rc = lpfc_sli4_queue_verify(phba);
6240         if (rc)
6241                 goto out_free_bsmbx;
6242
6243         /* Create driver internal CQE event pool */
6244         rc = lpfc_sli4_cq_event_pool_create(phba);
6245         if (rc)
6246                 goto out_free_bsmbx;
6247
6248         /* Initialize sgl lists per host */
6249         lpfc_init_sgl_list(phba);
6250
6251         /* Allocate and initialize active sgl array */
6252         rc = lpfc_init_active_sgl_array(phba);
6253         if (rc) {
6254                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6255                                 "1430 Failed to initialize sgl list.\n");
6256                 goto out_destroy_cq_event_pool;
6257         }
6258         rc = lpfc_sli4_init_rpi_hdrs(phba);
6259         if (rc) {
6260                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6261                                 "1432 Failed to initialize rpi headers.\n");
6262                 goto out_free_active_sgl;
6263         }
6264
6265         /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
6266         longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
6267         phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
6268                                          GFP_KERNEL);
6269         if (!phba->fcf.fcf_rr_bmask) {
6270                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6271                                 "2759 Failed allocate memory for FCF round "
6272                                 "robin failover bmask\n");
6273                 rc = -ENOMEM;
6274                 goto out_remove_rpi_hdrs;
6275         }
6276
6277         phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
6278                                                 sizeof(struct lpfc_hba_eq_hdl),
6279                                                 GFP_KERNEL);
6280         if (!phba->sli4_hba.hba_eq_hdl) {
6281                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6282                                 "2572 Failed allocate memory for "
6283                                 "fast-path per-EQ handle array\n");
6284                 rc = -ENOMEM;
6285                 goto out_free_fcf_rr_bmask;
6286         }
6287
6288         phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
6289                                         sizeof(struct lpfc_vector_map_info),
6290                                         GFP_KERNEL);
6291         if (!phba->sli4_hba.cpu_map) {
6292                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6293                                 "3327 Failed allocate memory for msi-x "
6294                                 "interrupt vector mapping\n");
6295                 rc = -ENOMEM;
6296                 goto out_free_hba_eq_hdl;
6297         }
6298         if (lpfc_used_cpu == NULL) {
6299                 lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
6300                                                 GFP_KERNEL);
6301                 if (!lpfc_used_cpu) {
6302                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6303                                         "3335 Failed allocate memory for msi-x "
6304                                         "interrupt vector mapping\n");
6305                         kfree(phba->sli4_hba.cpu_map);
6306                         rc = -ENOMEM;
6307                         goto out_free_hba_eq_hdl;
6308                 }
6309                 for (i = 0; i < lpfc_present_cpu; i++)
6310                         lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
6311         }
6312
6313         /*
6314          * Enable sr-iov virtual functions if supported and configured
6315          * through the module parameter.
6316          */
6317         if (phba->cfg_sriov_nr_virtfn > 0) {
6318                 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
6319                                                  phba->cfg_sriov_nr_virtfn);
6320                 if (rc) {
6321                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6322                                         "3020 Requested number of SR-IOV "
6323                                         "virtual functions (%d) is not "
6324                                         "supported\n",
6325                                         phba->cfg_sriov_nr_virtfn);
6326                         phba->cfg_sriov_nr_virtfn = 0;
6327                 }
6328         }
6329
6330         return 0;
6331
6332 out_free_hba_eq_hdl:
6333         kfree(phba->sli4_hba.hba_eq_hdl);
6334 out_free_fcf_rr_bmask:
6335         kfree(phba->fcf.fcf_rr_bmask);
6336 out_remove_rpi_hdrs:
6337         lpfc_sli4_remove_rpi_hdrs(phba);
6338 out_free_active_sgl:
6339         lpfc_free_active_sgl(phba);
6340 out_destroy_cq_event_pool:
6341         lpfc_sli4_cq_event_pool_destroy(phba);
6342 out_free_bsmbx:
6343         lpfc_destroy_bootstrap_mbox(phba);
6344 out_free_mem:
6345         lpfc_mem_free(phba);
6346         return rc;
6347 }
6348
6349 /**
6350  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
6351  * @phba: pointer to lpfc hba data structure.
6352  *
6353  * This routine is invoked to unset the driver internal resources set up
6354  * specific for supporting the SLI-4 HBA device it attached to.
6355  **/
6356 static void
6357 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
6358 {
6359         struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6360
6361         /* Free memory allocated for msi-x interrupt vector to CPU mapping */
6362         kfree(phba->sli4_hba.cpu_map);
6363         phba->sli4_hba.num_present_cpu = 0;
6364         phba->sli4_hba.num_online_cpu = 0;
6365         phba->sli4_hba.curr_disp_cpu = 0;
6366
6367         /* Free memory allocated for fast-path work queue handles */
6368         kfree(phba->sli4_hba.hba_eq_hdl);
6369
6370         /* Free the allocated rpi headers. */
6371         lpfc_sli4_remove_rpi_hdrs(phba);
6372         lpfc_sli4_remove_rpis(phba);
6373
6374         /* Free eligible FCF index bmask */
6375         kfree(phba->fcf.fcf_rr_bmask);
6376
6377         /* Free the ELS sgl list */
6378         lpfc_free_active_sgl(phba);
6379         lpfc_free_els_sgl_list(phba);
6380         lpfc_free_nvmet_sgl_list(phba);
6381
6382         /* Free the completion queue EQ event pool */
6383         lpfc_sli4_cq_event_release_all(phba);
6384         lpfc_sli4_cq_event_pool_destroy(phba);
6385
6386         /* Release resource identifiers. */
6387         lpfc_sli4_dealloc_resource_identifiers(phba);
6388
6389         /* Free the bsmbx region. */
6390         lpfc_destroy_bootstrap_mbox(phba);
6391
6392         /* Free the SLI Layer memory with SLI4 HBAs */
6393         lpfc_mem_free_all(phba);
6394
6395         /* Free the current connect table */
6396         list_for_each_entry_safe(conn_entry, next_conn_entry,
6397                 &phba->fcf_conn_rec_list, list) {
6398                 list_del_init(&conn_entry->list);
6399                 kfree(conn_entry);
6400         }
6401
6402         return;
6403 }
6404
6405 /**
6406  * lpfc_init_api_table_setup - Set up init api function jump table
6407  * @phba: The hba struct for which this call is being executed.
6408  * @dev_grp: The HBA PCI-Device group number.
6409  *
6410  * This routine sets up the device INIT interface API function jump table
6411  * in @phba struct.
6412  *
6413  * Returns: 0 - success, -ENODEV - failure.
6414  **/
6415 int
6416 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6417 {
6418         phba->lpfc_hba_init_link = lpfc_hba_init_link;
6419         phba->lpfc_hba_down_link = lpfc_hba_down_link;
6420         phba->lpfc_selective_reset = lpfc_selective_reset;
6421         switch (dev_grp) {
6422         case LPFC_PCI_DEV_LP:
6423                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
6424                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
6425                 phba->lpfc_stop_port = lpfc_stop_port_s3;
6426                 break;
6427         case LPFC_PCI_DEV_OC:
6428                 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
6429                 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
6430                 phba->lpfc_stop_port = lpfc_stop_port_s4;
6431                 break;
6432         default:
6433                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6434                                 "1431 Invalid HBA PCI-device group: 0x%x\n",
6435                                 dev_grp);
6436                 return -ENODEV;
6437                 break;
6438         }
6439         return 0;
6440 }
6441
6442 /**
6443  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
6444  * @phba: pointer to lpfc hba data structure.
6445  *
6446  * This routine is invoked to set up the driver internal resources after the
6447  * device specific resource setup to support the HBA device it attached to.
6448  *
6449  * Return codes
6450  *      0 - successful
6451  *      other values - error
6452  **/
6453 static int
6454 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6455 {
6456         int error;
6457
6458         /* Startup the kernel thread for this host adapter. */
6459         phba->worker_thread = kthread_run(lpfc_do_work, phba,
6460                                           "lpfc_worker_%d", phba->brd_no);
6461         if (IS_ERR(phba->worker_thread)) {
6462                 error = PTR_ERR(phba->worker_thread);
6463                 return error;
6464         }
6465
6466         /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
6467         if (phba->sli_rev == LPFC_SLI_REV4)
6468                 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6469         else
6470                 phba->wq = NULL;
6471
6472         return 0;
6473 }
6474
6475 /**
6476  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
6477  * @phba: pointer to lpfc hba data structure.
6478  *
6479  * This routine is invoked to unset the driver internal resources set up after
6480  * the device specific resource setup for supporting the HBA device it
6481  * attached to.
6482  **/
6483 static void
6484 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6485 {
6486         if (phba->wq) {
6487                 flush_workqueue(phba->wq);
6488                 destroy_workqueue(phba->wq);
6489                 phba->wq = NULL;
6490         }
6491
6492         /* Stop kernel worker thread */
6493         if (phba->worker_thread)
6494                 kthread_stop(phba->worker_thread);
6495 }
6496
6497 /**
6498  * lpfc_free_iocb_list - Free iocb list.
6499  * @phba: pointer to lpfc hba data structure.
6500  *
6501  * This routine is invoked to free the driver's IOCB list and memory.
6502  **/
6503 void
6504 lpfc_free_iocb_list(struct lpfc_hba *phba)
6505 {
6506         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
6507
6508         spin_lock_irq(&phba->hbalock);
6509         list_for_each_entry_safe(iocbq_entry, iocbq_next,
6510                                  &phba->lpfc_iocb_list, list) {
6511                 list_del(&iocbq_entry->list);
6512                 kfree(iocbq_entry);
6513                 phba->total_iocbq_bufs--;
6514         }
6515         spin_unlock_irq(&phba->hbalock);
6516
6517         return;
6518 }
6519
6520 /**
6521  * lpfc_init_iocb_list - Allocate and initialize iocb list.
6522  * @phba: pointer to lpfc hba data structure.
6523  *
6524  * This routine is invoked to allocate and initizlize the driver's IOCB
6525  * list and set up the IOCB tag array accordingly.
6526  *
6527  * Return codes
6528  *      0 - successful
6529  *      other values - error
6530  **/
6531 int
6532 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
6533 {
6534         struct lpfc_iocbq *iocbq_entry = NULL;
6535         uint16_t iotag;
6536         int i;
6537
6538         /* Initialize and populate the iocb list per host.  */
6539         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
6540         for (i = 0; i < iocb_count; i++) {
6541                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
6542                 if (iocbq_entry == NULL) {
6543                         printk(KERN_ERR "%s: only allocated %d iocbs of "
6544                                 "expected %d count. Unloading driver.\n",
6545                                 __func__, i, LPFC_IOCB_LIST_CNT);
6546                         goto out_free_iocbq;
6547                 }
6548
6549                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
6550                 if (iotag == 0) {
6551                         kfree(iocbq_entry);
6552                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
6553                                 "Unloading driver.\n", __func__);
6554                         goto out_free_iocbq;
6555                 }
6556                 iocbq_entry->sli4_lxritag = NO_XRI;
6557                 iocbq_entry->sli4_xritag = NO_XRI;
6558
6559                 spin_lock_irq(&phba->hbalock);
6560                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
6561                 phba->total_iocbq_bufs++;
6562                 spin_unlock_irq(&phba->hbalock);
6563         }
6564
6565         return 0;
6566
6567 out_free_iocbq:
6568         lpfc_free_iocb_list(phba);
6569
6570         return -ENOMEM;
6571 }
6572
6573 /**
6574  * lpfc_free_sgl_list - Free a given sgl list.
6575  * @phba: pointer to lpfc hba data structure.
6576  * @sglq_list: pointer to the head of sgl list.
6577  *
6578  * This routine is invoked to free a give sgl list and memory.
6579  **/
6580 void
6581 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
6582 {
6583         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6584
6585         list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
6586                 list_del(&sglq_entry->list);
6587                 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
6588                 kfree(sglq_entry);
6589         }
6590 }
6591
6592 /**
6593  * lpfc_free_els_sgl_list - Free els sgl list.
6594  * @phba: pointer to lpfc hba data structure.
6595  *
6596  * This routine is invoked to free the driver's els sgl list and memory.
6597  **/
6598 static void
6599 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
6600 {
6601         LIST_HEAD(sglq_list);
6602
6603         /* Retrieve all els sgls from driver list */
6604         spin_lock_irq(&phba->hbalock);
6605         spin_lock(&phba->sli4_hba.sgl_list_lock);
6606         list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
6607         spin_unlock(&phba->sli4_hba.sgl_list_lock);
6608         spin_unlock_irq(&phba->hbalock);
6609
6610         /* Now free the sgl list */
6611         lpfc_free_sgl_list(phba, &sglq_list);
6612 }
6613
6614 /**
6615  * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
6616  * @phba: pointer to lpfc hba data structure.
6617  *
6618  * This routine is invoked to free the driver's nvmet sgl list and memory.
6619  **/
6620 static void
6621 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
6622 {
6623         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6624         LIST_HEAD(sglq_list);
6625
6626         /* Retrieve all nvmet sgls from driver list */
6627         spin_lock_irq(&phba->hbalock);
6628         spin_lock(&phba->sli4_hba.sgl_list_lock);
6629         list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
6630         spin_unlock(&phba->sli4_hba.sgl_list_lock);
6631         spin_unlock_irq(&phba->hbalock);
6632
6633         /* Now free the sgl list */
6634         list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
6635                 list_del(&sglq_entry->list);
6636                 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
6637                 kfree(sglq_entry);
6638         }
6639
6640         /* Update the nvmet_xri_cnt to reflect no current sgls.
6641          * The next initialization cycle sets the count and allocates
6642          * the sgls over again.
6643          */
6644         phba->sli4_hba.nvmet_xri_cnt = 0;
6645 }
6646
6647 /**
6648  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
6649  * @phba: pointer to lpfc hba data structure.
6650  *
6651  * This routine is invoked to allocate the driver's active sgl memory.
6652  * This array will hold the sglq_entry's for active IOs.
6653  **/
6654 static int
6655 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
6656 {
6657         int size;
6658         size = sizeof(struct lpfc_sglq *);
6659         size *= phba->sli4_hba.max_cfg_param.max_xri;
6660
6661         phba->sli4_hba.lpfc_sglq_active_list =
6662                 kzalloc(size, GFP_KERNEL);
6663         if (!phba->sli4_hba.lpfc_sglq_active_list)
6664                 return -ENOMEM;
6665         return 0;
6666 }
6667
6668 /**
6669  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
6670  * @phba: pointer to lpfc hba data structure.
6671  *
6672  * This routine is invoked to walk through the array of active sglq entries
6673  * and free all of the resources.
6674  * This is just a place holder for now.
6675  **/
6676 static void
6677 lpfc_free_active_sgl(struct lpfc_hba *phba)
6678 {
6679         kfree(phba->sli4_hba.lpfc_sglq_active_list);
6680 }
6681
6682 /**
6683  * lpfc_init_sgl_list - Allocate and initialize sgl list.
6684  * @phba: pointer to lpfc hba data structure.
6685  *
6686  * This routine is invoked to allocate and initizlize the driver's sgl
6687  * list and set up the sgl xritag tag array accordingly.
6688  *
6689  **/
6690 static void
6691 lpfc_init_sgl_list(struct lpfc_hba *phba)
6692 {
6693         /* Initialize and populate the sglq list per host/VF. */
6694         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
6695         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
6696         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
6697         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
6698
6699         /* els xri-sgl book keeping */
6700         phba->sli4_hba.els_xri_cnt = 0;
6701
6702         /* scsi xri-buffer book keeping */
6703         phba->sli4_hba.scsi_xri_cnt = 0;
6704
6705         /* nvme xri-buffer book keeping */
6706         phba->sli4_hba.nvme_xri_cnt = 0;
6707 }
6708
6709 /**
6710  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
6711  * @phba: pointer to lpfc hba data structure.
6712  *
6713  * This routine is invoked to post rpi header templates to the
6714  * port for those SLI4 ports that do not support extents.  This routine
6715  * posts a PAGE_SIZE memory region to the port to hold up to
6716  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
6717  * and should be called only when interrupts are disabled.
6718  *
6719  * Return codes
6720  *      0 - successful
6721  *      -ERROR - otherwise.
6722  **/
6723 int
6724 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
6725 {
6726         int rc = 0;
6727         struct lpfc_rpi_hdr *rpi_hdr;
6728
6729         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
6730         if (!phba->sli4_hba.rpi_hdrs_in_use)
6731                 return rc;
6732         if (phba->sli4_hba.extents_in_use)
6733                 return -EIO;
6734
6735         rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
6736         if (!rpi_hdr) {
6737                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6738                                 "0391 Error during rpi post operation\n");
6739                 lpfc_sli4_remove_rpis(phba);
6740                 rc = -ENODEV;
6741         }
6742
6743         return rc;
6744 }
6745
6746 /**
6747  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
6748  * @phba: pointer to lpfc hba data structure.
6749  *
6750  * This routine is invoked to allocate a single 4KB memory region to
6751  * support rpis and stores them in the phba.  This single region
6752  * provides support for up to 64 rpis.  The region is used globally
6753  * by the device.
6754  *
6755  * Returns:
6756  *   A valid rpi hdr on success.
6757  *   A NULL pointer on any failure.
6758  **/
6759 struct lpfc_rpi_hdr *
6760 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6761 {
6762         uint16_t rpi_limit, curr_rpi_range;
6763         struct lpfc_dmabuf *dmabuf;
6764         struct lpfc_rpi_hdr *rpi_hdr;
6765
6766         /*
6767          * If the SLI4 port supports extents, posting the rpi header isn't
6768          * required.  Set the expected maximum count and let the actual value
6769          * get set when extents are fully allocated.
6770          */
6771         if (!phba->sli4_hba.rpi_hdrs_in_use)
6772                 return NULL;
6773         if (phba->sli4_hba.extents_in_use)
6774                 return NULL;
6775
6776         /* The limit on the logical index is just the max_rpi count. */
6777         rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
6778
6779         spin_lock_irq(&phba->hbalock);
6780         /*
6781          * Establish the starting RPI in this header block.  The starting
6782          * rpi is normalized to a zero base because the physical rpi is
6783          * port based.
6784          */
6785         curr_rpi_range = phba->sli4_hba.next_rpi;
6786         spin_unlock_irq(&phba->hbalock);
6787
6788         /* Reached full RPI range */
6789         if (curr_rpi_range == rpi_limit)
6790                 return NULL;
6791
6792         /*
6793          * First allocate the protocol header region for the port.  The
6794          * port expects a 4KB DMA-mapped memory region that is 4K aligned.
6795          */
6796         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6797         if (!dmabuf)
6798                 return NULL;
6799
6800         dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
6801                                            LPFC_HDR_TEMPLATE_SIZE,
6802                                            &dmabuf->phys, GFP_KERNEL);
6803         if (!dmabuf->virt) {
6804                 rpi_hdr = NULL;
6805                 goto err_free_dmabuf;
6806         }
6807
6808         if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
6809                 rpi_hdr = NULL;
6810                 goto err_free_coherent;
6811         }
6812
6813         /* Save the rpi header data for cleanup later. */
6814         rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
6815         if (!rpi_hdr)
6816                 goto err_free_coherent;
6817
6818         rpi_hdr->dmabuf = dmabuf;
6819         rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
6820         rpi_hdr->page_count = 1;
6821         spin_lock_irq(&phba->hbalock);
6822
6823         /* The rpi_hdr stores the logical index only. */
6824         rpi_hdr->start_rpi = curr_rpi_range;
6825         rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
6826         list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
6827
6828         spin_unlock_irq(&phba->hbalock);
6829         return rpi_hdr;
6830
6831  err_free_coherent:
6832         dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
6833                           dmabuf->virt, dmabuf->phys);
6834  err_free_dmabuf:
6835         kfree(dmabuf);
6836         return NULL;
6837 }
6838
6839 /**
6840  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
6841  * @phba: pointer to lpfc hba data structure.
6842  *
6843  * This routine is invoked to remove all memory resources allocated
6844  * to support rpis for SLI4 ports not supporting extents. This routine
6845  * presumes the caller has released all rpis consumed by fabric or port
6846  * logins and is prepared to have the header pages removed.
6847  **/
6848 void
6849 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
6850 {
6851         struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
6852
6853         if (!phba->sli4_hba.rpi_hdrs_in_use)
6854                 goto exit;
6855
6856         list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
6857                                  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6858                 list_del(&rpi_hdr->list);
6859                 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
6860                                   rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
6861                 kfree(rpi_hdr->dmabuf);
6862                 kfree(rpi_hdr);
6863         }
6864  exit:
6865         /* There are no rpis available to the port now. */
6866         phba->sli4_hba.next_rpi = 0;
6867 }
6868
6869 /**
6870  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
6871  * @pdev: pointer to pci device data structure.
6872  *
6873  * This routine is invoked to allocate the driver hba data structure for an
6874  * HBA device. If the allocation is successful, the phba reference to the
6875  * PCI device data structure is set.
6876  *
6877  * Return codes
6878  *      pointer to @phba - successful
6879  *      NULL - error
6880  **/
6881 static struct lpfc_hba *
6882 lpfc_hba_alloc(struct pci_dev *pdev)
6883 {
6884         struct lpfc_hba *phba;
6885
6886         /* Allocate memory for HBA structure */
6887         phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
6888         if (!phba) {
6889                 dev_err(&pdev->dev, "failed to allocate hba struct\n");
6890                 return NULL;
6891         }
6892
6893         /* Set reference to PCI device in HBA structure */
6894         phba->pcidev = pdev;
6895
6896         /* Assign an unused board number */
6897         phba->brd_no = lpfc_get_instance();
6898         if (phba->brd_no < 0) {
6899                 kfree(phba);
6900                 return NULL;
6901         }
6902         phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
6903
6904         spin_lock_init(&phba->ct_ev_lock);
6905         INIT_LIST_HEAD(&phba->ct_ev_waiters);
6906
6907         return phba;
6908 }
6909
6910 /**
6911  * lpfc_hba_free - Free driver hba data structure with a device.
6912  * @phba: pointer to lpfc hba data structure.
6913  *
6914  * This routine is invoked to free the driver hba data structure with an
6915  * HBA device.
6916  **/
6917 static void
6918 lpfc_hba_free(struct lpfc_hba *phba)
6919 {
6920         /* Release the driver assigned board number */
6921         idr_remove(&lpfc_hba_index, phba->brd_no);
6922
6923         /* Free memory allocated with sli3 rings */
6924         kfree(phba->sli.sli3_ring);
6925         phba->sli.sli3_ring = NULL;
6926
6927         kfree(phba);
6928         return;
6929 }
6930
6931 /**
6932  * lpfc_create_shost - Create hba physical port with associated scsi host.
6933  * @phba: pointer to lpfc hba data structure.
6934  *
6935  * This routine is invoked to create HBA physical port and associate a SCSI
6936  * host with it.
6937  *
6938  * Return codes
6939  *      0 - successful
6940  *      other values - error
6941  **/
6942 static int
6943 lpfc_create_shost(struct lpfc_hba *phba)
6944 {
6945         struct lpfc_vport *vport;
6946         struct Scsi_Host  *shost;
6947
6948         /* Initialize HBA FC structure */
6949         phba->fc_edtov = FF_DEF_EDTOV;
6950         phba->fc_ratov = FF_DEF_RATOV;
6951         phba->fc_altov = FF_DEF_ALTOV;
6952         phba->fc_arbtov = FF_DEF_ARBTOV;
6953
6954         atomic_set(&phba->sdev_cnt, 0);
6955         atomic_set(&phba->fc4ScsiInputRequests, 0);
6956         atomic_set(&phba->fc4ScsiOutputRequests, 0);
6957         atomic_set(&phba->fc4ScsiControlRequests, 0);
6958         atomic_set(&phba->fc4ScsiIoCmpls, 0);
6959         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
6960         if (!vport)
6961                 return -ENODEV;
6962
6963         shost = lpfc_shost_from_vport(vport);
6964         phba->pport = vport;
6965
6966         if (phba->nvmet_support) {
6967                 /* Only 1 vport (pport) will support NVME target */
6968                 if (phba->txrdy_payload_pool == NULL) {
6969                         phba->txrdy_payload_pool = dma_pool_create(
6970                                 "txrdy_pool", &phba->pcidev->dev,
6971                                 TXRDY_PAYLOAD_LEN, 16, 0);
6972                         if (phba->txrdy_payload_pool) {
6973                                 phba->targetport = NULL;
6974                                 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
6975                                 lpfc_printf_log(phba, KERN_INFO,
6976                                                 LOG_INIT | LOG_NVME_DISC,
6977                                                 "6076 NVME Target Found\n");
6978                         }
6979                 }
6980         }
6981
6982         lpfc_debugfs_initialize(vport);
6983         /* Put reference to SCSI host to driver's device private data */
6984         pci_set_drvdata(phba->pcidev, shost);
6985
6986         /*
6987          * At this point we are fully registered with PSA. In addition,
6988          * any initial discovery should be completed.
6989          */
6990         vport->load_flag |= FC_ALLOW_FDMI;
6991         if (phba->cfg_enable_SmartSAN ||
6992             (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
6993
6994                 /* Setup appropriate attribute masks */
6995                 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
6996                 if (phba->cfg_enable_SmartSAN)
6997                         vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
6998                 else
6999                         vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
7000         }
7001         return 0;
7002 }
7003
7004 /**
7005  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
7006  * @phba: pointer to lpfc hba data structure.
7007  *
7008  * This routine is invoked to destroy HBA physical port and the associated
7009  * SCSI host.
7010  **/
7011 static void
7012 lpfc_destroy_shost(struct lpfc_hba *phba)
7013 {
7014         struct lpfc_vport *vport = phba->pport;
7015
7016         /* Destroy physical port that associated with the SCSI host */
7017         destroy_port(vport);
7018
7019         return;
7020 }
7021
7022 /**
7023  * lpfc_setup_bg - Setup Block guard structures and debug areas.
7024  * @phba: pointer to lpfc hba data structure.
7025  * @shost: the shost to be used to detect Block guard settings.
7026  *
7027  * This routine sets up the local Block guard protocol settings for @shost.
7028  * This routine also allocates memory for debugging bg buffers.
7029  **/
7030 static void
7031 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7032 {
7033         uint32_t old_mask;
7034         uint32_t old_guard;
7035
7036         int pagecnt = 10;
7037         if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7038                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7039                                 "1478 Registering BlockGuard with the "
7040                                 "SCSI layer\n");
7041
7042                 old_mask = phba->cfg_prot_mask;
7043                 old_guard = phba->cfg_prot_guard;
7044
7045                 /* Only allow supported values */
7046                 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
7047                         SHOST_DIX_TYPE0_PROTECTION |
7048                         SHOST_DIX_TYPE1_PROTECTION);
7049                 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
7050                                          SHOST_DIX_GUARD_CRC);
7051
7052                 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
7053                 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
7054                         phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
7055
7056                 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7057                         if ((old_mask != phba->cfg_prot_mask) ||
7058                                 (old_guard != phba->cfg_prot_guard))
7059                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7060                                         "1475 Registering BlockGuard with the "
7061                                         "SCSI layer: mask %d  guard %d\n",
7062                                         phba->cfg_prot_mask,
7063                                         phba->cfg_prot_guard);
7064
7065                         scsi_host_set_prot(shost, phba->cfg_prot_mask);
7066                         scsi_host_set_guard(shost, phba->cfg_prot_guard);
7067                 } else
7068                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7069                                 "1479 Not Registering BlockGuard with the SCSI "
7070                                 "layer, Bad protection parameters: %d %d\n",
7071                                 old_mask, old_guard);
7072         }
7073
7074         if (!_dump_buf_data) {
7075                 while (pagecnt) {
7076                         spin_lock_init(&_dump_buf_lock);
7077                         _dump_buf_data =
7078                                 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7079                         if (_dump_buf_data) {
7080                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7081                                         "9043 BLKGRD: allocated %d pages for "
7082                                        "_dump_buf_data at 0x%p\n",
7083                                        (1 << pagecnt), _dump_buf_data);
7084                                 _dump_buf_data_order = pagecnt;
7085                                 memset(_dump_buf_data, 0,
7086                                        ((1 << PAGE_SHIFT) << pagecnt));
7087                                 break;
7088                         } else
7089                                 --pagecnt;
7090                 }
7091                 if (!_dump_buf_data_order)
7092                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7093                                 "9044 BLKGRD: ERROR unable to allocate "
7094                                "memory for hexdump\n");
7095         } else
7096                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7097                         "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
7098                        "\n", _dump_buf_data);
7099         if (!_dump_buf_dif) {
7100                 while (pagecnt) {
7101                         _dump_buf_dif =
7102                                 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7103                         if (_dump_buf_dif) {
7104                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7105                                         "9046 BLKGRD: allocated %d pages for "
7106                                        "_dump_buf_dif at 0x%p\n",
7107                                        (1 << pagecnt), _dump_buf_dif);
7108                                 _dump_buf_dif_order = pagecnt;
7109                                 memset(_dump_buf_dif, 0,
7110                                        ((1 << PAGE_SHIFT) << pagecnt));
7111                                 break;
7112                         } else
7113                                 --pagecnt;
7114                 }
7115                 if (!_dump_buf_dif_order)
7116                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7117                         "9047 BLKGRD: ERROR unable to allocate "
7118                                "memory for hexdump\n");
7119         } else
7120                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7121                         "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
7122                        _dump_buf_dif);
7123 }
7124
7125 /**
7126  * lpfc_post_init_setup - Perform necessary device post initialization setup.
7127  * @phba: pointer to lpfc hba data structure.
7128  *
7129  * This routine is invoked to perform all the necessary post initialization
7130  * setup for the device.
7131  **/
7132 static void
7133 lpfc_post_init_setup(struct lpfc_hba *phba)
7134 {
7135         struct Scsi_Host  *shost;
7136         struct lpfc_adapter_event_header adapter_event;
7137
7138         /* Get the default values for Model Name and Description */
7139         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
7140
7141         /*
7142          * hba setup may have changed the hba_queue_depth so we need to
7143          * adjust the value of can_queue.
7144          */
7145         shost = pci_get_drvdata(phba->pcidev);
7146         shost->can_queue = phba->cfg_hba_queue_depth - 10;
7147         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
7148                 lpfc_setup_bg(phba, shost);
7149
7150         lpfc_host_attrib_init(shost);
7151
7152         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7153                 spin_lock_irq(shost->host_lock);
7154                 lpfc_poll_start_timer(phba);
7155                 spin_unlock_irq(shost->host_lock);
7156         }
7157
7158         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7159                         "0428 Perform SCSI scan\n");
7160         /* Send board arrival event to upper layer */
7161         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
7162         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
7163         fc_host_post_vendor_event(shost, fc_get_event_number(),
7164                                   sizeof(adapter_event),
7165                                   (char *) &adapter_event,
7166                                   LPFC_NL_VENDOR_ID);
7167         return;
7168 }
7169
7170 /**
7171  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
7172  * @phba: pointer to lpfc hba data structure.
7173  *
7174  * This routine is invoked to set up the PCI device memory space for device
7175  * with SLI-3 interface spec.
7176  *
7177  * Return codes
7178  *      0 - successful
7179  *      other values - error
7180  **/
7181 static int
7182 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7183 {
7184         struct pci_dev *pdev;
7185         unsigned long bar0map_len, bar2map_len;
7186         int i, hbq_count;
7187         void *ptr;
7188         int error = -ENODEV;
7189
7190         /* Obtain PCI device reference */
7191         if (!phba->pcidev)
7192                 return error;
7193         else
7194                 pdev = phba->pcidev;
7195
7196         /* Set the device DMA mask size */
7197         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7198          || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7199                 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7200                  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7201                         return error;
7202                 }
7203         }
7204
7205         /* Get the bus address of Bar0 and Bar2 and the number of bytes
7206          * required by each mapping.
7207          */
7208         phba->pci_bar0_map = pci_resource_start(pdev, 0);
7209         bar0map_len = pci_resource_len(pdev, 0);
7210
7211         phba->pci_bar2_map = pci_resource_start(pdev, 2);
7212         bar2map_len = pci_resource_len(pdev, 2);
7213
7214         /* Map HBA SLIM to a kernel virtual address. */
7215         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
7216         if (!phba->slim_memmap_p) {
7217                 dev_printk(KERN_ERR, &pdev->dev,
7218                            "ioremap failed for SLIM memory.\n");
7219                 goto out;
7220         }
7221
7222         /* Map HBA Control Registers to a kernel virtual address. */
7223         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
7224         if (!phba->ctrl_regs_memmap_p) {
7225                 dev_printk(KERN_ERR, &pdev->dev,
7226                            "ioremap failed for HBA control registers.\n");
7227                 goto out_iounmap_slim;
7228         }
7229
7230         /* Allocate memory for SLI-2 structures */
7231         phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7232                                                 &phba->slim2p.phys, GFP_KERNEL);
7233         if (!phba->slim2p.virt)
7234                 goto out_iounmap;
7235
7236         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
7237         phba->mbox_ext = (phba->slim2p.virt +
7238                 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
7239         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
7240         phba->IOCBs = (phba->slim2p.virt +
7241                        offsetof(struct lpfc_sli2_slim, IOCBs));
7242
7243         phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
7244                                                  lpfc_sli_hbq_size(),
7245                                                  &phba->hbqslimp.phys,
7246                                                  GFP_KERNEL);
7247         if (!phba->hbqslimp.virt)
7248                 goto out_free_slim;
7249
7250         hbq_count = lpfc_sli_hbq_count();
7251         ptr = phba->hbqslimp.virt;
7252         for (i = 0; i < hbq_count; ++i) {
7253                 phba->hbqs[i].hbq_virt = ptr;
7254                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
7255                 ptr += (lpfc_hbq_defs[i]->entry_count *
7256                         sizeof(struct lpfc_hbq_entry));
7257         }
7258         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
7259         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
7260
7261         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
7262
7263         phba->MBslimaddr = phba->slim_memmap_p;
7264         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
7265         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
7266         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
7267         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
7268
7269         return 0;
7270
7271 out_free_slim:
7272         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7273                           phba->slim2p.virt, phba->slim2p.phys);
7274 out_iounmap:
7275         iounmap(phba->ctrl_regs_memmap_p);
7276 out_iounmap_slim:
7277         iounmap(phba->slim_memmap_p);
7278 out:
7279         return error;
7280 }
7281
7282 /**
7283  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
7284  * @phba: pointer to lpfc hba data structure.
7285  *
7286  * This routine is invoked to unset the PCI device memory space for device
7287  * with SLI-3 interface spec.
7288  **/
7289 static void
7290 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
7291 {
7292         struct pci_dev *pdev;
7293
7294         /* Obtain PCI device reference */
7295         if (!phba->pcidev)
7296                 return;
7297         else
7298                 pdev = phba->pcidev;
7299
7300         /* Free coherent DMA memory allocated */
7301         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
7302                           phba->hbqslimp.virt, phba->hbqslimp.phys);
7303         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7304                           phba->slim2p.virt, phba->slim2p.phys);
7305
7306         /* I/O memory unmap */
7307         iounmap(phba->ctrl_regs_memmap_p);
7308         iounmap(phba->slim_memmap_p);
7309
7310         return;
7311 }
7312
7313 /**
7314  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
7315  * @phba: pointer to lpfc hba data structure.
7316  *
7317  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
7318  * done and check status.
7319  *
7320  * Return 0 if successful, otherwise -ENODEV.
7321  **/
7322 int
7323 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
7324 {
7325         struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
7326         struct lpfc_register reg_data;
7327         int i, port_error = 0;
7328         uint32_t if_type;
7329
7330         memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
7331         memset(&reg_data, 0, sizeof(reg_data));
7332         if (!phba->sli4_hba.PSMPHRregaddr)
7333                 return -ENODEV;
7334
7335         /* Wait up to 30 seconds for the SLI Port POST done and ready */
7336         for (i = 0; i < 3000; i++) {
7337                 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
7338                         &portsmphr_reg.word0) ||
7339                         (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
7340                         /* Port has a fatal POST error, break out */
7341                         port_error = -ENODEV;
7342                         break;
7343                 }
7344                 if (LPFC_POST_STAGE_PORT_READY ==
7345                     bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
7346                         break;
7347                 msleep(10);
7348         }
7349
7350         /*
7351          * If there was a port error during POST, then don't proceed with
7352          * other register reads as the data may not be valid.  Just exit.
7353          */
7354         if (port_error) {
7355                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7356                         "1408 Port Failed POST - portsmphr=0x%x, "
7357                         "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
7358                         "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
7359                         portsmphr_reg.word0,
7360                         bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
7361                         bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
7362                         bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
7363                         bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
7364                         bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
7365                         bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
7366                         bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
7367                         bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
7368         } else {
7369                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7370                                 "2534 Device Info: SLIFamily=0x%x, "
7371                                 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
7372                                 "SLIHint_2=0x%x, FT=0x%x\n",
7373                                 bf_get(lpfc_sli_intf_sli_family,
7374                                        &phba->sli4_hba.sli_intf),
7375                                 bf_get(lpfc_sli_intf_slirev,
7376                                        &phba->sli4_hba.sli_intf),
7377                                 bf_get(lpfc_sli_intf_if_type,
7378                                        &phba->sli4_hba.sli_intf),
7379                                 bf_get(lpfc_sli_intf_sli_hint1,
7380                                        &phba->sli4_hba.sli_intf),
7381                                 bf_get(lpfc_sli_intf_sli_hint2,
7382                                        &phba->sli4_hba.sli_intf),
7383                                 bf_get(lpfc_sli_intf_func_type,
7384                                        &phba->sli4_hba.sli_intf));
7385                 /*
7386                  * Check for other Port errors during the initialization
7387                  * process.  Fail the load if the port did not come up
7388                  * correctly.
7389                  */
7390                 if_type = bf_get(lpfc_sli_intf_if_type,
7391                                  &phba->sli4_hba.sli_intf);
7392                 switch (if_type) {
7393                 case LPFC_SLI_INTF_IF_TYPE_0:
7394                         phba->sli4_hba.ue_mask_lo =
7395                               readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
7396                         phba->sli4_hba.ue_mask_hi =
7397                               readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
7398                         uerrlo_reg.word0 =
7399                               readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
7400                         uerrhi_reg.word0 =
7401                                 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
7402                         if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
7403                             (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7404                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7405                                                 "1422 Unrecoverable Error "
7406                                                 "Detected during POST "
7407                                                 "uerr_lo_reg=0x%x, "
7408                                                 "uerr_hi_reg=0x%x, "
7409                                                 "ue_mask_lo_reg=0x%x, "
7410                                                 "ue_mask_hi_reg=0x%x\n",
7411                                                 uerrlo_reg.word0,
7412                                                 uerrhi_reg.word0,
7413                                                 phba->sli4_hba.ue_mask_lo,
7414                                                 phba->sli4_hba.ue_mask_hi);
7415                                 port_error = -ENODEV;
7416                         }
7417                         break;
7418                 case LPFC_SLI_INTF_IF_TYPE_2:
7419                 case LPFC_SLI_INTF_IF_TYPE_6:
7420                         /* Final checks.  The port status should be clean. */
7421                         if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7422                                 &reg_data.word0) ||
7423                                 (bf_get(lpfc_sliport_status_err, &reg_data) &&
7424                                  !bf_get(lpfc_sliport_status_rn, &reg_data))) {
7425                                 phba->work_status[0] =
7426                                         readl(phba->sli4_hba.u.if_type2.
7427                                               ERR1regaddr);
7428                                 phba->work_status[1] =
7429                                         readl(phba->sli4_hba.u.if_type2.
7430                                               ERR2regaddr);
7431                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7432                                         "2888 Unrecoverable port error "
7433                                         "following POST: port status reg "
7434                                         "0x%x, port_smphr reg 0x%x, "
7435                                         "error 1=0x%x, error 2=0x%x\n",
7436                                         reg_data.word0,
7437                                         portsmphr_reg.word0,
7438                                         phba->work_status[0],
7439                                         phba->work_status[1]);
7440                                 port_error = -ENODEV;
7441                         }
7442                         break;
7443                 case LPFC_SLI_INTF_IF_TYPE_1:
7444                 default:
7445                         break;
7446                 }
7447         }
7448         return port_error;
7449 }
7450
7451 /**
7452  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
7453  * @phba: pointer to lpfc hba data structure.
7454  * @if_type:  The SLI4 interface type getting configured.
7455  *
7456  * This routine is invoked to set up SLI4 BAR0 PCI config space register
7457  * memory map.
7458  **/
7459 static void
7460 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7461 {
7462         switch (if_type) {
7463         case LPFC_SLI_INTF_IF_TYPE_0:
7464                 phba->sli4_hba.u.if_type0.UERRLOregaddr =
7465                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
7466                 phba->sli4_hba.u.if_type0.UERRHIregaddr =
7467                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
7468                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
7469                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
7470                 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
7471                         phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
7472                 phba->sli4_hba.SLIINTFregaddr =
7473                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7474                 break;
7475         case LPFC_SLI_INTF_IF_TYPE_2:
7476                 phba->sli4_hba.u.if_type2.EQDregaddr =
7477                         phba->sli4_hba.conf_regs_memmap_p +
7478                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
7479                 phba->sli4_hba.u.if_type2.ERR1regaddr =
7480                         phba->sli4_hba.conf_regs_memmap_p +
7481                                                 LPFC_CTL_PORT_ER1_OFFSET;
7482                 phba->sli4_hba.u.if_type2.ERR2regaddr =
7483                         phba->sli4_hba.conf_regs_memmap_p +
7484                                                 LPFC_CTL_PORT_ER2_OFFSET;
7485                 phba->sli4_hba.u.if_type2.CTRLregaddr =
7486                         phba->sli4_hba.conf_regs_memmap_p +
7487                                                 LPFC_CTL_PORT_CTL_OFFSET;
7488                 phba->sli4_hba.u.if_type2.STATUSregaddr =
7489                         phba->sli4_hba.conf_regs_memmap_p +
7490                                                 LPFC_CTL_PORT_STA_OFFSET;
7491                 phba->sli4_hba.SLIINTFregaddr =
7492                         phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
7493                 phba->sli4_hba.PSMPHRregaddr =
7494                         phba->sli4_hba.conf_regs_memmap_p +
7495                                                 LPFC_CTL_PORT_SEM_OFFSET;
7496                 phba->sli4_hba.RQDBregaddr =
7497                         phba->sli4_hba.conf_regs_memmap_p +
7498                                                 LPFC_ULP0_RQ_DOORBELL;
7499                 phba->sli4_hba.WQDBregaddr =
7500                         phba->sli4_hba.conf_regs_memmap_p +
7501                                                 LPFC_ULP0_WQ_DOORBELL;
7502                 phba->sli4_hba.CQDBregaddr =
7503                         phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
7504                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
7505                 phba->sli4_hba.MQDBregaddr =
7506                         phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
7507                 phba->sli4_hba.BMBXregaddr =
7508                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
7509                 break;
7510         case LPFC_SLI_INTF_IF_TYPE_6:
7511                 phba->sli4_hba.u.if_type2.EQDregaddr =
7512                         phba->sli4_hba.conf_regs_memmap_p +
7513                                                 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
7514                 phba->sli4_hba.u.if_type2.ERR1regaddr =
7515                         phba->sli4_hba.conf_regs_memmap_p +
7516                                                 LPFC_CTL_PORT_ER1_OFFSET;
7517                 phba->sli4_hba.u.if_type2.ERR2regaddr =
7518                         phba->sli4_hba.conf_regs_memmap_p +
7519                                                 LPFC_CTL_PORT_ER2_OFFSET;
7520                 phba->sli4_hba.u.if_type2.CTRLregaddr =
7521                         phba->sli4_hba.conf_regs_memmap_p +
7522                                                 LPFC_CTL_PORT_CTL_OFFSET;
7523                 phba->sli4_hba.u.if_type2.STATUSregaddr =
7524                         phba->sli4_hba.conf_regs_memmap_p +
7525                                                 LPFC_CTL_PORT_STA_OFFSET;
7526                 phba->sli4_hba.PSMPHRregaddr =
7527                         phba->sli4_hba.conf_regs_memmap_p +
7528                                                 LPFC_CTL_PORT_SEM_OFFSET;
7529                 phba->sli4_hba.BMBXregaddr =
7530                         phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
7531                 break;
7532         case LPFC_SLI_INTF_IF_TYPE_1:
7533         default:
7534                 dev_printk(KERN_ERR, &phba->pcidev->dev,
7535                            "FATAL - unsupported SLI4 interface type - %d\n",
7536                            if_type);
7537                 break;
7538         }
7539 }
7540
7541 /**
7542  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
7543  * @phba: pointer to lpfc hba data structure.
7544  *
7545  * This routine is invoked to set up SLI4 BAR1 register memory map.
7546  **/
7547 static void
7548 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
7549 {
7550         switch (if_type) {
7551         case LPFC_SLI_INTF_IF_TYPE_0:
7552                 phba->sli4_hba.PSMPHRregaddr =
7553                         phba->sli4_hba.ctrl_regs_memmap_p +
7554                         LPFC_SLIPORT_IF0_SMPHR;
7555                 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7556                         LPFC_HST_ISR0;
7557                 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7558                         LPFC_HST_IMR0;
7559                 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
7560                         LPFC_HST_ISCR0;
7561                 break;
7562         case LPFC_SLI_INTF_IF_TYPE_6:
7563                 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7564                         LPFC_IF6_RQ_DOORBELL;
7565                 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7566                         LPFC_IF6_WQ_DOORBELL;
7567                 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7568                         LPFC_IF6_CQ_DOORBELL;
7569                 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7570                         LPFC_IF6_EQ_DOORBELL;
7571                 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
7572                         LPFC_IF6_MQ_DOORBELL;
7573                 break;
7574         case LPFC_SLI_INTF_IF_TYPE_2:
7575         case LPFC_SLI_INTF_IF_TYPE_1:
7576         default:
7577                 dev_err(&phba->pcidev->dev,
7578                            "FATAL - unsupported SLI4 interface type - %d\n",
7579                            if_type);
7580                 break;
7581         }
7582 }
7583
7584 /**
7585  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
7586  * @phba: pointer to lpfc hba data structure.
7587  * @vf: virtual function number
7588  *
7589  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
7590  * based on the given viftual function number, @vf.
7591  *
7592  * Return 0 if successful, otherwise -ENODEV.
7593  **/
7594 static int
7595 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
7596 {
7597         if (vf > LPFC_VIR_FUNC_MAX)
7598                 return -ENODEV;
7599
7600         phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7601                                 vf * LPFC_VFR_PAGE_SIZE +
7602                                         LPFC_ULP0_RQ_DOORBELL);
7603         phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7604                                 vf * LPFC_VFR_PAGE_SIZE +
7605                                         LPFC_ULP0_WQ_DOORBELL);
7606         phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7607                                 vf * LPFC_VFR_PAGE_SIZE +
7608                                         LPFC_EQCQ_DOORBELL);
7609         phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
7610         phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7611                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
7612         phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
7613                                 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
7614         return 0;
7615 }
7616
7617 /**
7618  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
7619  * @phba: pointer to lpfc hba data structure.
7620  *
7621  * This routine is invoked to create the bootstrap mailbox
7622  * region consistent with the SLI-4 interface spec.  This
7623  * routine allocates all memory necessary to communicate
7624  * mailbox commands to the port and sets up all alignment
7625  * needs.  No locks are expected to be held when calling
7626  * this routine.
7627  *
7628  * Return codes
7629  *      0 - successful
7630  *      -ENOMEM - could not allocated memory.
7631  **/
7632 static int
7633 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
7634 {
7635         uint32_t bmbx_size;
7636         struct lpfc_dmabuf *dmabuf;
7637         struct dma_address *dma_address;
7638         uint32_t pa_addr;
7639         uint64_t phys_addr;
7640
7641         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
7642         if (!dmabuf)
7643                 return -ENOMEM;
7644
7645         /*
7646          * The bootstrap mailbox region is comprised of 2 parts
7647          * plus an alignment restriction of 16 bytes.
7648          */
7649         bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
7650         dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
7651                                            &dmabuf->phys, GFP_KERNEL);
7652         if (!dmabuf->virt) {
7653                 kfree(dmabuf);
7654                 return -ENOMEM;
7655         }
7656
7657         /*
7658          * Initialize the bootstrap mailbox pointers now so that the register
7659          * operations are simple later.  The mailbox dma address is required
7660          * to be 16-byte aligned.  Also align the virtual memory as each
7661          * maibox is copied into the bmbx mailbox region before issuing the
7662          * command to the port.
7663          */
7664         phba->sli4_hba.bmbx.dmabuf = dmabuf;
7665         phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
7666
7667         phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
7668                                               LPFC_ALIGN_16_BYTE);
7669         phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
7670                                               LPFC_ALIGN_16_BYTE);
7671
7672         /*
7673          * Set the high and low physical addresses now.  The SLI4 alignment
7674          * requirement is 16 bytes and the mailbox is posted to the port
7675          * as two 30-bit addresses.  The other data is a bit marking whether
7676          * the 30-bit address is the high or low address.
7677          * Upcast bmbx aphys to 64bits so shift instruction compiles
7678          * clean on 32 bit machines.
7679          */
7680         dma_address = &phba->sli4_hba.bmbx.dma_address;
7681         phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
7682         pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
7683         dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
7684                                            LPFC_BMBX_BIT1_ADDR_HI);
7685
7686         pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
7687         dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
7688                                            LPFC_BMBX_BIT1_ADDR_LO);
7689         return 0;
7690 }
7691
7692 /**
7693  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
7694  * @phba: pointer to lpfc hba data structure.
7695  *
7696  * This routine is invoked to teardown the bootstrap mailbox
7697  * region and release all host resources. This routine requires
7698  * the caller to ensure all mailbox commands recovered, no
7699  * additional mailbox comands are sent, and interrupts are disabled
7700  * before calling this routine.
7701  *
7702  **/
7703 static void
7704 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
7705 {
7706         dma_free_coherent(&phba->pcidev->dev,
7707                           phba->sli4_hba.bmbx.bmbx_size,
7708                           phba->sli4_hba.bmbx.dmabuf->virt,
7709                           phba->sli4_hba.bmbx.dmabuf->phys);
7710
7711         kfree(phba->sli4_hba.bmbx.dmabuf);
7712         memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
7713 }
7714
7715 /**
7716  * lpfc_sli4_read_config - Get the config parameters.
7717  * @phba: pointer to lpfc hba data structure.
7718  *
7719  * This routine is invoked to read the configuration parameters from the HBA.
7720  * The configuration parameters are used to set the base and maximum values
7721  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
7722  * allocation for the port.
7723  *
7724  * Return codes
7725  *      0 - successful
7726  *      -ENOMEM - No available memory
7727  *      -EIO - The mailbox failed to complete successfully.
7728  **/
7729 int
7730 lpfc_sli4_read_config(struct lpfc_hba *phba)
7731 {
7732         LPFC_MBOXQ_t *pmb;
7733         struct lpfc_mbx_read_config *rd_config;
7734         union  lpfc_sli4_cfg_shdr *shdr;
7735         uint32_t shdr_status, shdr_add_status;
7736         struct lpfc_mbx_get_func_cfg *get_func_cfg;
7737         struct lpfc_rsrc_desc_fcfcoe *desc;
7738         char *pdesc_0;
7739         uint16_t forced_link_speed;
7740         uint32_t if_type;
7741         int length, i, rc = 0, rc2;
7742
7743         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7744         if (!pmb) {
7745                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7746                                 "2011 Unable to allocate memory for issuing "
7747                                 "SLI_CONFIG_SPECIAL mailbox command\n");
7748                 return -ENOMEM;
7749         }
7750
7751         lpfc_read_config(phba, pmb);
7752
7753         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7754         if (rc != MBX_SUCCESS) {
7755                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7756                         "2012 Mailbox failed , mbxCmd x%x "
7757                         "READ_CONFIG, mbxStatus x%x\n",
7758                         bf_get(lpfc_mqe_command, &pmb->u.mqe),
7759                         bf_get(lpfc_mqe_status, &pmb->u.mqe));
7760                 rc = -EIO;
7761         } else {
7762                 rd_config = &pmb->u.mqe.un.rd_config;
7763                 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
7764                         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
7765                         phba->sli4_hba.lnk_info.lnk_tp =
7766                                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
7767                         phba->sli4_hba.lnk_info.lnk_no =
7768                                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
7769                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7770                                         "3081 lnk_type:%d, lnk_numb:%d\n",
7771                                         phba->sli4_hba.lnk_info.lnk_tp,
7772                                         phba->sli4_hba.lnk_info.lnk_no);
7773                 } else
7774                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7775                                         "3082 Mailbox (x%x) returned ldv:x0\n",
7776                                         bf_get(lpfc_mqe_command, &pmb->u.mqe));
7777                 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
7778                         phba->bbcredit_support = 1;
7779                         phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
7780                 }
7781
7782                 phba->sli4_hba.extents_in_use =
7783                         bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
7784                 phba->sli4_hba.max_cfg_param.max_xri =
7785                         bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
7786                 phba->sli4_hba.max_cfg_param.xri_base =
7787                         bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
7788                 phba->sli4_hba.max_cfg_param.max_vpi =
7789                         bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
7790                 phba->sli4_hba.max_cfg_param.vpi_base =
7791                         bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
7792                 phba->sli4_hba.max_cfg_param.max_rpi =
7793                         bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
7794                 phba->sli4_hba.max_cfg_param.rpi_base =
7795                         bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
7796                 phba->sli4_hba.max_cfg_param.max_vfi =
7797                         bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
7798                 phba->sli4_hba.max_cfg_param.vfi_base =
7799                         bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
7800                 phba->sli4_hba.max_cfg_param.max_fcfi =
7801                         bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
7802                 phba->sli4_hba.max_cfg_param.max_eq =
7803                         bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
7804                 phba->sli4_hba.max_cfg_param.max_rq =
7805                         bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
7806                 phba->sli4_hba.max_cfg_param.max_wq =
7807                         bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
7808                 phba->sli4_hba.max_cfg_param.max_cq =
7809                         bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
7810                 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
7811                 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
7812                 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
7813                 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
7814                 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
7815                                 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
7816                 phba->max_vports = phba->max_vpi;
7817                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7818                                 "2003 cfg params Extents? %d "
7819                                 "XRI(B:%d M:%d), "
7820                                 "VPI(B:%d M:%d) "
7821                                 "VFI(B:%d M:%d) "
7822                                 "RPI(B:%d M:%d) "
7823                                 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
7824                                 phba->sli4_hba.extents_in_use,
7825                                 phba->sli4_hba.max_cfg_param.xri_base,
7826                                 phba->sli4_hba.max_cfg_param.max_xri,
7827                                 phba->sli4_hba.max_cfg_param.vpi_base,
7828                                 phba->sli4_hba.max_cfg_param.max_vpi,
7829                                 phba->sli4_hba.max_cfg_param.vfi_base,
7830                                 phba->sli4_hba.max_cfg_param.max_vfi,
7831                                 phba->sli4_hba.max_cfg_param.rpi_base,
7832                                 phba->sli4_hba.max_cfg_param.max_rpi,
7833                                 phba->sli4_hba.max_cfg_param.max_fcfi,
7834                                 phba->sli4_hba.max_cfg_param.max_eq,
7835                                 phba->sli4_hba.max_cfg_param.max_cq,
7836                                 phba->sli4_hba.max_cfg_param.max_wq,
7837                                 phba->sli4_hba.max_cfg_param.max_rq);
7838
7839                 /*
7840                  * Calculate NVME queue resources based on how
7841                  * many WQ/CQs are available.
7842                  */
7843                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7844                         length = phba->sli4_hba.max_cfg_param.max_wq;
7845                         if (phba->sli4_hba.max_cfg_param.max_cq <
7846                             phba->sli4_hba.max_cfg_param.max_wq)
7847                                 length = phba->sli4_hba.max_cfg_param.max_cq;
7848
7849                         /*
7850                          * Whats left after this can go toward NVME.
7851                          * The minus 6 accounts for ELS, NVME LS, MBOX
7852                          * fof plus a couple extra. When configured for
7853                          * NVMET, FCP io channel WQs are not created.
7854                          */
7855                         length -= 6;
7856                         if (!phba->nvmet_support)
7857                                 length -= phba->cfg_fcp_io_channel;
7858
7859                         if (phba->cfg_nvme_io_channel > length) {
7860                                 lpfc_printf_log(
7861                                         phba, KERN_ERR, LOG_SLI,
7862                                         "2005 Reducing NVME IO channel to %d: "
7863                                         "WQ %d CQ %d NVMEIO %d FCPIO %d\n",
7864                                         length,
7865                                         phba->sli4_hba.max_cfg_param.max_wq,
7866                                         phba->sli4_hba.max_cfg_param.max_cq,
7867                                         phba->cfg_nvme_io_channel,
7868                                         phba->cfg_fcp_io_channel);
7869
7870                                 phba->cfg_nvme_io_channel = length;
7871                         }
7872                 }
7873         }
7874
7875         if (rc)
7876                 goto read_cfg_out;
7877
7878         /* Update link speed if forced link speed is supported */
7879         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7880         if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
7881                 forced_link_speed =
7882                         bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
7883                 if (forced_link_speed) {
7884                         phba->hba_flag |= HBA_FORCED_LINK_SPEED;
7885
7886                         switch (forced_link_speed) {
7887                         case LINK_SPEED_1G:
7888                                 phba->cfg_link_speed =
7889                                         LPFC_USER_LINK_SPEED_1G;
7890                                 break;
7891                         case LINK_SPEED_2G:
7892                                 phba->cfg_link_speed =
7893                                         LPFC_USER_LINK_SPEED_2G;
7894                                 break;
7895                         case LINK_SPEED_4G:
7896                                 phba->cfg_link_speed =
7897                                         LPFC_USER_LINK_SPEED_4G;
7898                                 break;
7899                         case LINK_SPEED_8G:
7900                                 phba->cfg_link_speed =
7901                                         LPFC_USER_LINK_SPEED_8G;
7902                                 break;
7903                         case LINK_SPEED_10G:
7904                                 phba->cfg_link_speed =
7905                                         LPFC_USER_LINK_SPEED_10G;
7906                                 break;
7907                         case LINK_SPEED_16G:
7908                                 phba->cfg_link_speed =
7909                                         LPFC_USER_LINK_SPEED_16G;
7910                                 break;
7911                         case LINK_SPEED_32G:
7912                                 phba->cfg_link_speed =
7913                                         LPFC_USER_LINK_SPEED_32G;
7914                                 break;
7915                         case LINK_SPEED_64G:
7916                                 phba->cfg_link_speed =
7917                                         LPFC_USER_LINK_SPEED_64G;
7918                                 break;
7919                         case 0xffff:
7920                                 phba->cfg_link_speed =
7921                                         LPFC_USER_LINK_SPEED_AUTO;
7922                                 break;
7923                         default:
7924                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7925                                                 "0047 Unrecognized link "
7926                                                 "speed : %d\n",
7927                                                 forced_link_speed);
7928                                 phba->cfg_link_speed =
7929                                         LPFC_USER_LINK_SPEED_AUTO;
7930                         }
7931                 }
7932         }
7933
7934         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
7935         length = phba->sli4_hba.max_cfg_param.max_xri -
7936                         lpfc_sli4_get_els_iocb_cnt(phba);
7937         if (phba->cfg_hba_queue_depth > length) {
7938                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7939                                 "3361 HBA queue depth changed from %d to %d\n",
7940                                 phba->cfg_hba_queue_depth, length);
7941                 phba->cfg_hba_queue_depth = length;
7942         }
7943
7944         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
7945             LPFC_SLI_INTF_IF_TYPE_2)
7946                 goto read_cfg_out;
7947
7948         /* get the pf# and vf# for SLI4 if_type 2 port */
7949         length = (sizeof(struct lpfc_mbx_get_func_cfg) -
7950                   sizeof(struct lpfc_sli4_cfg_mhdr));
7951         lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
7952                          LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
7953                          length, LPFC_SLI4_MBX_EMBED);
7954
7955         rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7956         shdr = (union lpfc_sli4_cfg_shdr *)
7957                                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7958         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7959         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7960         if (rc2 || shdr_status || shdr_add_status) {
7961                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7962                                 "3026 Mailbox failed , mbxCmd x%x "
7963                                 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
7964                                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
7965                                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
7966                 goto read_cfg_out;
7967         }
7968
7969         /* search for fc_fcoe resrouce descriptor */
7970         get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
7971
7972         pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
7973         desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
7974         length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
7975         if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
7976                 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
7977         else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
7978                 goto read_cfg_out;
7979
7980         for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
7981                 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
7982                 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
7983                     bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
7984                         phba->sli4_hba.iov.pf_number =
7985                                 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
7986                         phba->sli4_hba.iov.vf_number =
7987                                 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
7988                         break;
7989                 }
7990         }
7991
7992         if (i < LPFC_RSRC_DESC_MAX_NUM)
7993                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7994                                 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
7995                                 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
7996                                 phba->sli4_hba.iov.vf_number);
7997         else
7998                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7999                                 "3028 GET_FUNCTION_CONFIG: failed to find "
8000                                 "Resource Descriptor:x%x\n",
8001                                 LPFC_RSRC_DESC_TYPE_FCFCOE);
8002
8003 read_cfg_out:
8004         mempool_free(pmb, phba->mbox_mem_pool);
8005         return rc;
8006 }
8007
8008 /**
8009  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
8010  * @phba: pointer to lpfc hba data structure.
8011  *
8012  * This routine is invoked to setup the port-side endian order when
8013  * the port if_type is 0.  This routine has no function for other
8014  * if_types.
8015  *
8016  * Return codes
8017  *      0 - successful
8018  *      -ENOMEM - No available memory
8019  *      -EIO - The mailbox failed to complete successfully.
8020  **/
8021 static int
8022 lpfc_setup_endian_order(struct lpfc_hba *phba)
8023 {
8024         LPFC_MBOXQ_t *mboxq;
8025         uint32_t if_type, rc = 0;
8026         uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
8027                                       HOST_ENDIAN_HIGH_WORD1};
8028
8029         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8030         switch (if_type) {
8031         case LPFC_SLI_INTF_IF_TYPE_0:
8032                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8033                                                        GFP_KERNEL);
8034                 if (!mboxq) {
8035                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8036                                         "0492 Unable to allocate memory for "
8037                                         "issuing SLI_CONFIG_SPECIAL mailbox "
8038                                         "command\n");
8039                         return -ENOMEM;
8040                 }
8041
8042                 /*
8043                  * The SLI4_CONFIG_SPECIAL mailbox command requires the first
8044                  * two words to contain special data values and no other data.
8045                  */
8046                 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
8047                 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
8048                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8049                 if (rc != MBX_SUCCESS) {
8050                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8051                                         "0493 SLI_CONFIG_SPECIAL mailbox "
8052                                         "failed with status x%x\n",
8053                                         rc);
8054                         rc = -EIO;
8055                 }
8056                 mempool_free(mboxq, phba->mbox_mem_pool);
8057                 break;
8058         case LPFC_SLI_INTF_IF_TYPE_6:
8059         case LPFC_SLI_INTF_IF_TYPE_2:
8060         case LPFC_SLI_INTF_IF_TYPE_1:
8061         default:
8062                 break;
8063         }
8064         return rc;
8065 }
8066
8067 /**
8068  * lpfc_sli4_queue_verify - Verify and update EQ counts
8069  * @phba: pointer to lpfc hba data structure.
8070  *
8071  * This routine is invoked to check the user settable queue counts for EQs.
8072  * After this routine is called the counts will be set to valid values that
8073  * adhere to the constraints of the system's interrupt vectors and the port's
8074  * queue resources.
8075  *
8076  * Return codes
8077  *      0 - successful
8078  *      -ENOMEM - No available memory
8079  **/
8080 static int
8081 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8082 {
8083         int io_channel;
8084         int fof_vectors = phba->cfg_fof ? 1 : 0;
8085
8086         /*
8087          * Sanity check for configured queue parameters against the run-time
8088          * device parameters
8089          */
8090
8091         /* Sanity check on HBA EQ parameters */
8092         io_channel = phba->io_channel_irqs;
8093
8094         if (phba->sli4_hba.num_online_cpu < io_channel) {
8095                 lpfc_printf_log(phba,
8096                                 KERN_ERR, LOG_INIT,
8097                                 "3188 Reducing IO channels to match number of "
8098                                 "online CPUs: from %d to %d\n",
8099                                 io_channel, phba->sli4_hba.num_online_cpu);
8100                 io_channel = phba->sli4_hba.num_online_cpu;
8101         }
8102
8103         if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
8104                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8105                                 "2575 Reducing IO channels to match number of "
8106                                 "available EQs: from %d to %d\n",
8107                                 io_channel,
8108                                 phba->sli4_hba.max_cfg_param.max_eq);
8109                 io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
8110         }
8111
8112         /* The actual number of FCP / NVME event queues adopted */
8113         if (io_channel != phba->io_channel_irqs)
8114                 phba->io_channel_irqs = io_channel;
8115         if (phba->cfg_fcp_io_channel > io_channel)
8116                 phba->cfg_fcp_io_channel = io_channel;
8117         if (phba->cfg_nvme_io_channel > io_channel)
8118                 phba->cfg_nvme_io_channel = io_channel;
8119         if (phba->nvmet_support) {
8120                 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
8121                         phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
8122         }
8123         if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8124                 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
8125
8126         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8127                         "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
8128                         phba->io_channel_irqs, phba->cfg_fcp_io_channel,
8129                         phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
8130
8131         /* Get EQ depth from module parameter, fake the default for now */
8132         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8133         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8134
8135         /* Get CQ depth from module parameter, fake the default for now */
8136         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8137         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8138         return 0;
8139 }
8140
8141 static int
8142 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
8143 {
8144         struct lpfc_queue *qdesc;
8145
8146         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8147                                       phba->sli4_hba.cq_esize,
8148                                       LPFC_CQE_EXP_COUNT);
8149         if (!qdesc) {
8150                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8151                                 "0508 Failed allocate fast-path NVME CQ (%d)\n",
8152                                 wqidx);
8153                 return 1;
8154         }
8155         qdesc->qe_valid = 1;
8156         phba->sli4_hba.nvme_cq[wqidx] = qdesc;
8157
8158         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8159                                       LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
8160         if (!qdesc) {
8161                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8162                                 "0509 Failed allocate fast-path NVME WQ (%d)\n",
8163                                 wqidx);
8164                 return 1;
8165         }
8166         phba->sli4_hba.nvme_wq[wqidx] = qdesc;
8167         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8168         return 0;
8169 }
8170
8171 static int
8172 lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8173 {
8174         struct lpfc_queue *qdesc;
8175         uint32_t wqesize;
8176
8177         /* Create Fast Path FCP CQs */
8178         if (phba->enab_exp_wqcq_pages)
8179                 /* Increase the CQ size when WQEs contain an embedded cdb */
8180                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8181                                               phba->sli4_hba.cq_esize,
8182                                               LPFC_CQE_EXP_COUNT);
8183
8184         else
8185                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8186                                               phba->sli4_hba.cq_esize,
8187                                               phba->sli4_hba.cq_ecount);
8188         if (!qdesc) {
8189                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8190                         "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
8191                 return 1;
8192         }
8193         qdesc->qe_valid = 1;
8194         phba->sli4_hba.fcp_cq[wqidx] = qdesc;
8195
8196         /* Create Fast Path FCP WQs */
8197         if (phba->enab_exp_wqcq_pages) {
8198                 /* Increase the WQ size when WQEs contain an embedded cdb */
8199                 wqesize = (phba->fcp_embed_io) ?
8200                         LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
8201                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8202                                               wqesize,
8203                                               LPFC_WQE_EXP_COUNT);
8204         } else
8205                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8206                                               phba->sli4_hba.wq_esize,
8207                                               phba->sli4_hba.wq_ecount);
8208
8209         if (!qdesc) {
8210                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8211                                 "0503 Failed allocate fast-path FCP WQ (%d)\n",
8212                                 wqidx);
8213                 return 1;
8214         }
8215         phba->sli4_hba.fcp_wq[wqidx] = qdesc;
8216         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8217         return 0;
8218 }
8219
8220 /**
8221  * lpfc_sli4_queue_create - Create all the SLI4 queues
8222  * @phba: pointer to lpfc hba data structure.
8223  *
8224  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
8225  * operation. For each SLI4 queue type, the parameters such as queue entry
8226  * count (queue depth) shall be taken from the module parameter. For now,
8227  * we just use some constant number as place holder.
8228  *
8229  * Return codes
8230  *      0 - successful
8231  *      -ENOMEM - No availble memory
8232  *      -EIO - The mailbox failed to complete successfully.
8233  **/
8234 int
8235 lpfc_sli4_queue_create(struct lpfc_hba *phba)
8236 {
8237         struct lpfc_queue *qdesc;
8238         int idx, io_channel;
8239
8240         /*
8241          * Create HBA Record arrays.
8242          * Both NVME and FCP will share that same vectors / EQs
8243          */
8244         io_channel = phba->io_channel_irqs;
8245         if (!io_channel)
8246                 return -ERANGE;
8247
8248         phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
8249         phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
8250         phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
8251         phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
8252         phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
8253         phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
8254         phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
8255         phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
8256         phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
8257         phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
8258
8259         phba->sli4_hba.hba_eq =  kcalloc(io_channel,
8260                                         sizeof(struct lpfc_queue *),
8261                                         GFP_KERNEL);
8262         if (!phba->sli4_hba.hba_eq) {
8263                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8264                         "2576 Failed allocate memory for "
8265                         "fast-path EQ record array\n");
8266                 goto out_error;
8267         }
8268
8269         if (phba->cfg_fcp_io_channel) {
8270                 phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
8271                                                 sizeof(struct lpfc_queue *),
8272                                                 GFP_KERNEL);
8273                 if (!phba->sli4_hba.fcp_cq) {
8274                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8275                                         "2577 Failed allocate memory for "
8276                                         "fast-path CQ record array\n");
8277                         goto out_error;
8278                 }
8279                 phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
8280                                                 sizeof(struct lpfc_queue *),
8281                                                 GFP_KERNEL);
8282                 if (!phba->sli4_hba.fcp_wq) {
8283                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8284                                         "2578 Failed allocate memory for "
8285                                         "fast-path FCP WQ record array\n");
8286                         goto out_error;
8287                 }
8288                 /*
8289                  * Since the first EQ can have multiple CQs associated with it,
8290                  * this array is used to quickly see if we have a FCP fast-path
8291                  * CQ match.
8292                  */
8293                 phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
8294                                                         sizeof(uint16_t),
8295                                                         GFP_KERNEL);
8296                 if (!phba->sli4_hba.fcp_cq_map) {
8297                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8298                                         "2545 Failed allocate memory for "
8299                                         "fast-path CQ map\n");
8300                         goto out_error;
8301                 }
8302         }
8303
8304         if (phba->cfg_nvme_io_channel) {
8305                 phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
8306                                                 sizeof(struct lpfc_queue *),
8307                                                 GFP_KERNEL);
8308                 if (!phba->sli4_hba.nvme_cq) {
8309                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8310                                         "6077 Failed allocate memory for "
8311                                         "fast-path CQ record array\n");
8312                         goto out_error;
8313                 }
8314
8315                 phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
8316                                                 sizeof(struct lpfc_queue *),
8317                                                 GFP_KERNEL);
8318                 if (!phba->sli4_hba.nvme_wq) {
8319                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8320                                         "2581 Failed allocate memory for "
8321                                         "fast-path NVME WQ record array\n");
8322                         goto out_error;
8323                 }
8324
8325                 /*
8326                  * Since the first EQ can have multiple CQs associated with it,
8327                  * this array is used to quickly see if we have a NVME fast-path
8328                  * CQ match.
8329                  */
8330                 phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
8331                                                         sizeof(uint16_t),
8332                                                         GFP_KERNEL);
8333                 if (!phba->sli4_hba.nvme_cq_map) {
8334                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8335                                         "6078 Failed allocate memory for "
8336                                         "fast-path CQ map\n");
8337                         goto out_error;
8338                 }
8339
8340                 if (phba->nvmet_support) {
8341                         phba->sli4_hba.nvmet_cqset = kcalloc(
8342                                         phba->cfg_nvmet_mrq,
8343                                         sizeof(struct lpfc_queue *),
8344                                         GFP_KERNEL);
8345                         if (!phba->sli4_hba.nvmet_cqset) {
8346                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8347                                         "3121 Fail allocate memory for "
8348                                         "fast-path CQ set array\n");
8349                                 goto out_error;
8350                         }
8351                         phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
8352                                         phba->cfg_nvmet_mrq,
8353                                         sizeof(struct lpfc_queue *),
8354                                         GFP_KERNEL);
8355                         if (!phba->sli4_hba.nvmet_mrq_hdr) {
8356                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8357                                         "3122 Fail allocate memory for "
8358                                         "fast-path RQ set hdr array\n");
8359                                 goto out_error;
8360                         }
8361                         phba->sli4_hba.nvmet_mrq_data = kcalloc(
8362                                         phba->cfg_nvmet_mrq,
8363                                         sizeof(struct lpfc_queue *),
8364                                         GFP_KERNEL);
8365                         if (!phba->sli4_hba.nvmet_mrq_data) {
8366                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8367                                         "3124 Fail allocate memory for "
8368                                         "fast-path RQ set data array\n");
8369                                 goto out_error;
8370                         }
8371                 }
8372         }
8373
8374         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8375
8376         /* Create HBA Event Queues (EQs) */
8377         for (idx = 0; idx < io_channel; idx++) {
8378                 /* Create EQs */
8379                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8380                                               phba->sli4_hba.eq_esize,
8381                                               phba->sli4_hba.eq_ecount);
8382                 if (!qdesc) {
8383                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8384                                         "0497 Failed allocate EQ (%d)\n", idx);
8385                         goto out_error;
8386                 }
8387                 qdesc->qe_valid = 1;
8388                 phba->sli4_hba.hba_eq[idx] = qdesc;
8389         }
8390
8391         /* FCP and NVME io channels are not required to be balanced */
8392
8393         for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8394                 if (lpfc_alloc_fcp_wq_cq(phba, idx))
8395                         goto out_error;
8396
8397         for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
8398                 if (lpfc_alloc_nvme_wq_cq(phba, idx))
8399                         goto out_error;
8400
8401         if (phba->nvmet_support) {
8402                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8403                         qdesc = lpfc_sli4_queue_alloc(phba,
8404                                                       LPFC_DEFAULT_PAGE_SIZE,
8405                                                       phba->sli4_hba.cq_esize,
8406                                                       phba->sli4_hba.cq_ecount);
8407                         if (!qdesc) {
8408                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8409                                         "3142 Failed allocate NVME "
8410                                         "CQ Set (%d)\n", idx);
8411                                 goto out_error;
8412                         }
8413                         qdesc->qe_valid = 1;
8414                         phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8415                 }
8416         }
8417
8418         /*
8419          * Create Slow Path Completion Queues (CQs)
8420          */
8421
8422         /* Create slow-path Mailbox Command Complete Queue */
8423         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8424                                       phba->sli4_hba.cq_esize,
8425                                       phba->sli4_hba.cq_ecount);
8426         if (!qdesc) {
8427                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8428                                 "0500 Failed allocate slow-path mailbox CQ\n");
8429                 goto out_error;
8430         }
8431         qdesc->qe_valid = 1;
8432         phba->sli4_hba.mbx_cq = qdesc;
8433
8434         /* Create slow-path ELS Complete Queue */
8435         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8436                                       phba->sli4_hba.cq_esize,
8437                                       phba->sli4_hba.cq_ecount);
8438         if (!qdesc) {
8439                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8440                                 "0501 Failed allocate slow-path ELS CQ\n");
8441                 goto out_error;
8442         }
8443         qdesc->qe_valid = 1;
8444         phba->sli4_hba.els_cq = qdesc;
8445
8446
8447         /*
8448          * Create Slow Path Work Queues (WQs)
8449          */
8450
8451         /* Create Mailbox Command Queue */
8452
8453         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8454                                       phba->sli4_hba.mq_esize,
8455                                       phba->sli4_hba.mq_ecount);
8456         if (!qdesc) {
8457                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8458                                 "0505 Failed allocate slow-path MQ\n");
8459                 goto out_error;
8460         }
8461         phba->sli4_hba.mbx_wq = qdesc;
8462
8463         /*
8464          * Create ELS Work Queues
8465          */
8466
8467         /* Create slow-path ELS Work Queue */
8468         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8469                                       phba->sli4_hba.wq_esize,
8470                                       phba->sli4_hba.wq_ecount);
8471         if (!qdesc) {
8472                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8473                                 "0504 Failed allocate slow-path ELS WQ\n");
8474                 goto out_error;
8475         }
8476         phba->sli4_hba.els_wq = qdesc;
8477         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8478
8479         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8480                 /* Create NVME LS Complete Queue */
8481                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8482                                               phba->sli4_hba.cq_esize,
8483                                               phba->sli4_hba.cq_ecount);
8484                 if (!qdesc) {
8485                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8486                                         "6079 Failed allocate NVME LS CQ\n");
8487                         goto out_error;
8488                 }
8489                 qdesc->qe_valid = 1;
8490                 phba->sli4_hba.nvmels_cq = qdesc;
8491
8492                 /* Create NVME LS Work Queue */
8493                 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8494                                               phba->sli4_hba.wq_esize,
8495                                               phba->sli4_hba.wq_ecount);
8496                 if (!qdesc) {
8497                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8498                                         "6080 Failed allocate NVME LS WQ\n");
8499                         goto out_error;
8500                 }
8501                 phba->sli4_hba.nvmels_wq = qdesc;
8502                 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8503         }
8504
8505         /*
8506          * Create Receive Queue (RQ)
8507          */
8508
8509         /* Create Receive Queue for header */
8510         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8511                                       phba->sli4_hba.rq_esize,
8512                                       phba->sli4_hba.rq_ecount);
8513         if (!qdesc) {
8514                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8515                                 "0506 Failed allocate receive HRQ\n");
8516                 goto out_error;
8517         }
8518         phba->sli4_hba.hdr_rq = qdesc;
8519
8520         /* Create Receive Queue for data */
8521         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8522                                       phba->sli4_hba.rq_esize,
8523                                       phba->sli4_hba.rq_ecount);
8524         if (!qdesc) {
8525                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8526                                 "0507 Failed allocate receive DRQ\n");
8527                 goto out_error;
8528         }
8529         phba->sli4_hba.dat_rq = qdesc;
8530
8531         if (phba->nvmet_support) {
8532                 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8533                         /* Create NVMET Receive Queue for header */
8534                         qdesc = lpfc_sli4_queue_alloc(phba,
8535                                                       LPFC_DEFAULT_PAGE_SIZE,
8536                                                       phba->sli4_hba.rq_esize,
8537                                                       LPFC_NVMET_RQE_DEF_COUNT);
8538                         if (!qdesc) {
8539                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8540                                                 "3146 Failed allocate "
8541                                                 "receive HRQ\n");
8542                                 goto out_error;
8543                         }
8544                         phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
8545
8546                         /* Only needed for header of RQ pair */
8547                         qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
8548                                               GFP_KERNEL);
8549                         if (qdesc->rqbp == NULL) {
8550                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8551                                                 "6131 Failed allocate "
8552                                                 "Header RQBP\n");
8553                                 goto out_error;
8554                         }
8555
8556                         /* Put list in known state in case driver load fails. */
8557                         INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
8558
8559                         /* Create NVMET Receive Queue for data */
8560                         qdesc = lpfc_sli4_queue_alloc(phba,
8561                                                       LPFC_DEFAULT_PAGE_SIZE,
8562                                                       phba->sli4_hba.rq_esize,
8563                                                       LPFC_NVMET_RQE_DEF_COUNT);
8564                         if (!qdesc) {
8565                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8566                                                 "3156 Failed allocate "
8567                                                 "receive DRQ\n");
8568                                 goto out_error;
8569                         }
8570                         phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
8571                 }
8572         }
8573
8574         /* Create the Queues needed for Flash Optimized Fabric operations */
8575         if (phba->cfg_fof)
8576                 lpfc_fof_queue_create(phba);
8577         return 0;
8578
8579 out_error:
8580         lpfc_sli4_queue_destroy(phba);
8581         return -ENOMEM;
8582 }
8583
8584 static inline void
8585 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
8586 {
8587         if (*qp != NULL) {
8588                 lpfc_sli4_queue_free(*qp);
8589                 *qp = NULL;
8590         }
8591 }
8592
8593 static inline void
8594 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
8595 {
8596         int idx;
8597
8598         if (*qs == NULL)
8599                 return;
8600
8601         for (idx = 0; idx < max; idx++)
8602                 __lpfc_sli4_release_queue(&(*qs)[idx]);
8603
8604         kfree(*qs);
8605         *qs = NULL;
8606 }
8607
8608 static inline void
8609 lpfc_sli4_release_queue_map(uint16_t **qmap)
8610 {
8611         if (*qmap != NULL) {
8612                 kfree(*qmap);
8613                 *qmap = NULL;
8614         }
8615 }
8616
8617 /**
8618  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
8619  * @phba: pointer to lpfc hba data structure.
8620  *
8621  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
8622  * operation.
8623  *
8624  * Return codes
8625  *      0 - successful
8626  *      -ENOMEM - No available memory
8627  *      -EIO - The mailbox failed to complete successfully.
8628  **/
8629 void
8630 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
8631 {
8632         if (phba->cfg_fof)
8633                 lpfc_fof_queue_destroy(phba);
8634
8635         /* Release HBA eqs */
8636         lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
8637
8638         /* Release FCP cqs */
8639         lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
8640                                  phba->cfg_fcp_io_channel);
8641
8642         /* Release FCP wqs */
8643         lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
8644                                  phba->cfg_fcp_io_channel);
8645
8646         /* Release FCP CQ mapping array */
8647         lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
8648
8649         /* Release NVME cqs */
8650         lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
8651                                         phba->cfg_nvme_io_channel);
8652
8653         /* Release NVME wqs */
8654         lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
8655                                         phba->cfg_nvme_io_channel);
8656
8657         /* Release NVME CQ mapping array */
8658         lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
8659
8660         if (phba->nvmet_support) {
8661                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
8662                                          phba->cfg_nvmet_mrq);
8663
8664                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
8665                                          phba->cfg_nvmet_mrq);
8666                 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
8667                                          phba->cfg_nvmet_mrq);
8668         }
8669
8670         /* Release mailbox command work queue */
8671         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
8672
8673         /* Release ELS work queue */
8674         __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
8675
8676         /* Release ELS work queue */
8677         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
8678
8679         /* Release unsolicited receive queue */
8680         __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
8681         __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
8682
8683         /* Release ELS complete queue */
8684         __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
8685
8686         /* Release NVME LS complete queue */
8687         __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
8688
8689         /* Release mailbox command complete queue */
8690         __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
8691
8692         /* Everything on this list has been freed */
8693         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
8694 }
8695
8696 int
8697 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
8698 {
8699         struct lpfc_rqb *rqbp;
8700         struct lpfc_dmabuf *h_buf;
8701         struct rqb_dmabuf *rqb_buffer;
8702
8703         rqbp = rq->rqbp;
8704         while (!list_empty(&rqbp->rqb_buffer_list)) {
8705                 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
8706                                  struct lpfc_dmabuf, list);
8707
8708                 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
8709                 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
8710                 rqbp->buffer_count--;
8711         }
8712         return 1;
8713 }
8714
8715 static int
8716 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
8717         struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
8718         int qidx, uint32_t qtype)
8719 {
8720         struct lpfc_sli_ring *pring;
8721         int rc;
8722
8723         if (!eq || !cq || !wq) {
8724                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8725                         "6085 Fast-path %s (%d) not allocated\n",
8726                         ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
8727                 return -ENOMEM;
8728         }
8729
8730         /* create the Cq first */
8731         rc = lpfc_cq_create(phba, cq, eq,
8732                         (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
8733         if (rc) {
8734                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8735                         "6086 Failed setup of CQ (%d), rc = 0x%x\n",
8736                         qidx, (uint32_t)rc);
8737                 return rc;
8738         }
8739         cq->chann = qidx;
8740
8741         if (qtype != LPFC_MBOX) {
8742                 /* Setup nvme_cq_map for fast lookup */
8743                 if (cq_map)
8744                         *cq_map = cq->queue_id;
8745
8746                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8747                         "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
8748                         qidx, cq->queue_id, qidx, eq->queue_id);
8749
8750                 /* create the wq */
8751                 rc = lpfc_wq_create(phba, wq, cq, qtype);
8752                 if (rc) {
8753                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8754                                 "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
8755                                 qidx, (uint32_t)rc);
8756                         /* no need to tear down cq - caller will do so */
8757                         return rc;
8758                 }
8759                 wq->chann = qidx;
8760
8761                 /* Bind this CQ/WQ to the NVME ring */
8762                 pring = wq->pring;
8763                 pring->sli.sli4.wqp = (void *)wq;
8764                 cq->pring = pring;
8765
8766                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8767                         "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
8768                         qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
8769         } else {
8770                 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
8771                 if (rc) {
8772                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8773                                 "0539 Failed setup of slow-path MQ: "
8774                                 "rc = 0x%x\n", rc);
8775                         /* no need to tear down cq - caller will do so */
8776                         return rc;
8777                 }
8778
8779                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8780                         "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
8781                         phba->sli4_hba.mbx_wq->queue_id,
8782                         phba->sli4_hba.mbx_cq->queue_id);
8783         }
8784
8785         return 0;
8786 }
8787
8788 /**
8789  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
8790  * @phba: pointer to lpfc hba data structure.
8791  *
8792  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
8793  * operation.
8794  *
8795  * Return codes
8796  *      0 - successful
8797  *      -ENOMEM - No available memory
8798  *      -EIO - The mailbox failed to complete successfully.
8799  **/
8800 int
8801 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8802 {
8803         uint32_t shdr_status, shdr_add_status;
8804         union lpfc_sli4_cfg_shdr *shdr;
8805         LPFC_MBOXQ_t *mboxq;
8806         int qidx;
8807         uint32_t length, io_channel;
8808         int rc = -ENOMEM;
8809
8810         /* Check for dual-ULP support */
8811         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8812         if (!mboxq) {
8813                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8814                                 "3249 Unable to allocate memory for "
8815                                 "QUERY_FW_CFG mailbox command\n");
8816                 return -ENOMEM;
8817         }
8818         length = (sizeof(struct lpfc_mbx_query_fw_config) -
8819                   sizeof(struct lpfc_sli4_cfg_mhdr));
8820         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8821                          LPFC_MBOX_OPCODE_QUERY_FW_CFG,
8822                          length, LPFC_SLI4_MBX_EMBED);
8823
8824         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8825
8826         shdr = (union lpfc_sli4_cfg_shdr *)
8827                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
8828         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8829         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
8830         if (shdr_status || shdr_add_status || rc) {
8831                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8832                                 "3250 QUERY_FW_CFG mailbox failed with status "
8833                                 "x%x add_status x%x, mbx status x%x\n",
8834                                 shdr_status, shdr_add_status, rc);
8835                 if (rc != MBX_TIMEOUT)
8836                         mempool_free(mboxq, phba->mbox_mem_pool);
8837                 rc = -ENXIO;
8838                 goto out_error;
8839         }
8840
8841         phba->sli4_hba.fw_func_mode =
8842                         mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
8843         phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
8844         phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
8845         phba->sli4_hba.physical_port =
8846                         mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
8847         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8848                         "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
8849                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
8850                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
8851
8852         if (rc != MBX_TIMEOUT)
8853                 mempool_free(mboxq, phba->mbox_mem_pool);
8854
8855         /*
8856          * Set up HBA Event Queues (EQs)
8857          */
8858         io_channel = phba->io_channel_irqs;
8859
8860         /* Set up HBA event queue */
8861         if (io_channel && !phba->sli4_hba.hba_eq) {
8862                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8863                                 "3147 Fast-path EQs not allocated\n");
8864                 rc = -ENOMEM;
8865                 goto out_error;
8866         }
8867         for (qidx = 0; qidx < io_channel; qidx++) {
8868                 if (!phba->sli4_hba.hba_eq[qidx]) {
8869                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8870                                         "0522 Fast-path EQ (%d) not "
8871                                         "allocated\n", qidx);
8872                         rc = -ENOMEM;
8873                         goto out_destroy;
8874                 }
8875                 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
8876                                                 phba->cfg_fcp_imax);
8877                 if (rc) {
8878                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8879                                         "0523 Failed setup of fast-path EQ "
8880                                         "(%d), rc = 0x%x\n", qidx,
8881                                         (uint32_t)rc);
8882                         goto out_destroy;
8883                 }
8884                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8885                                 "2584 HBA EQ setup: queue[%d]-id=%d\n",
8886                                 qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
8887         }
8888
8889         if (phba->cfg_nvme_io_channel) {
8890                 if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
8891                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8892                                 "6084 Fast-path NVME %s array not allocated\n",
8893                                 (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
8894                         rc = -ENOMEM;
8895                         goto out_destroy;
8896                 }
8897
8898                 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
8899                         rc = lpfc_create_wq_cq(phba,
8900                                         phba->sli4_hba.hba_eq[
8901                                                 qidx % io_channel],
8902                                         phba->sli4_hba.nvme_cq[qidx],
8903                                         phba->sli4_hba.nvme_wq[qidx],
8904                                         &phba->sli4_hba.nvme_cq_map[qidx],
8905                                         qidx, LPFC_NVME);
8906                         if (rc) {
8907                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8908                                         "6123 Failed to setup fastpath "
8909                                         "NVME WQ/CQ (%d), rc = 0x%x\n",
8910                                         qidx, (uint32_t)rc);
8911                                 goto out_destroy;
8912                         }
8913                 }
8914         }
8915
8916         if (phba->cfg_fcp_io_channel) {
8917                 /* Set up fast-path FCP Response Complete Queue */
8918                 if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
8919                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8920                                 "3148 Fast-path FCP %s array not allocated\n",
8921                                 phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
8922                         rc = -ENOMEM;
8923                         goto out_destroy;
8924                 }
8925
8926                 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
8927                         rc = lpfc_create_wq_cq(phba,
8928                                         phba->sli4_hba.hba_eq[
8929                                                 qidx % io_channel],
8930                                         phba->sli4_hba.fcp_cq[qidx],
8931                                         phba->sli4_hba.fcp_wq[qidx],
8932                                         &phba->sli4_hba.fcp_cq_map[qidx],
8933                                         qidx, LPFC_FCP);
8934                         if (rc) {
8935                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8936                                         "0535 Failed to setup fastpath "
8937                                         "FCP WQ/CQ (%d), rc = 0x%x\n",
8938                                         qidx, (uint32_t)rc);
8939                                 goto out_destroy;
8940                         }
8941                 }
8942         }
8943
8944         /*
8945          * Set up Slow Path Complete Queues (CQs)
8946          */
8947
8948         /* Set up slow-path MBOX CQ/MQ */
8949
8950         if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
8951                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8952                                 "0528 %s not allocated\n",
8953                                 phba->sli4_hba.mbx_cq ?
8954                                 "Mailbox WQ" : "Mailbox CQ");
8955                 rc = -ENOMEM;
8956                 goto out_destroy;
8957         }
8958
8959         rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8960                                phba->sli4_hba.mbx_cq,
8961                                phba->sli4_hba.mbx_wq,
8962                                NULL, 0, LPFC_MBOX);
8963         if (rc) {
8964                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8965                         "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
8966                         (uint32_t)rc);
8967                 goto out_destroy;
8968         }
8969         if (phba->nvmet_support) {
8970                 if (!phba->sli4_hba.nvmet_cqset) {
8971                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8972                                         "3165 Fast-path NVME CQ Set "
8973                                         "array not allocated\n");
8974                         rc = -ENOMEM;
8975                         goto out_destroy;
8976                 }
8977                 if (phba->cfg_nvmet_mrq > 1) {
8978                         rc = lpfc_cq_create_set(phba,
8979                                         phba->sli4_hba.nvmet_cqset,
8980                                         phba->sli4_hba.hba_eq,
8981                                         LPFC_WCQ, LPFC_NVMET);
8982                         if (rc) {
8983                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8984                                                 "3164 Failed setup of NVME CQ "
8985                                                 "Set, rc = 0x%x\n",
8986                                                 (uint32_t)rc);
8987                                 goto out_destroy;
8988                         }
8989                 } else {
8990                         /* Set up NVMET Receive Complete Queue */
8991                         rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
8992                                             phba->sli4_hba.hba_eq[0],
8993                                             LPFC_WCQ, LPFC_NVMET);
8994                         if (rc) {
8995                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8996                                                 "6089 Failed setup NVMET CQ: "
8997                                                 "rc = 0x%x\n", (uint32_t)rc);
8998                                 goto out_destroy;
8999                         }
9000                         phba->sli4_hba.nvmet_cqset[0]->chann = 0;
9001
9002                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9003                                         "6090 NVMET CQ setup: cq-id=%d, "
9004                                         "parent eq-id=%d\n",
9005                                         phba->sli4_hba.nvmet_cqset[0]->queue_id,
9006                                         phba->sli4_hba.hba_eq[0]->queue_id);
9007                 }
9008         }
9009
9010         /* Set up slow-path ELS WQ/CQ */
9011         if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
9012                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9013                                 "0530 ELS %s not allocated\n",
9014                                 phba->sli4_hba.els_cq ? "WQ" : "CQ");
9015                 rc = -ENOMEM;
9016                 goto out_destroy;
9017         }
9018         rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
9019                                         phba->sli4_hba.els_cq,
9020                                         phba->sli4_hba.els_wq,
9021                                         NULL, 0, LPFC_ELS);
9022         if (rc) {
9023                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9024                         "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9025                         (uint32_t)rc);
9026                 goto out_destroy;
9027         }
9028         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9029                         "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
9030                         phba->sli4_hba.els_wq->queue_id,
9031                         phba->sli4_hba.els_cq->queue_id);
9032
9033         if (phba->cfg_nvme_io_channel) {
9034                 /* Set up NVME LS Complete Queue */
9035                 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
9036                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9037                                         "6091 LS %s not allocated\n",
9038                                         phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
9039                         rc = -ENOMEM;
9040                         goto out_destroy;
9041                 }
9042                 rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
9043                                         phba->sli4_hba.nvmels_cq,
9044                                         phba->sli4_hba.nvmels_wq,
9045                                         NULL, 0, LPFC_NVME_LS);
9046                 if (rc) {
9047                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9048                                 "0529 Failed setup of NVVME LS WQ/CQ: "
9049                                 "rc = 0x%x\n", (uint32_t)rc);
9050                         goto out_destroy;
9051                 }
9052
9053                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9054                                 "6096 ELS WQ setup: wq-id=%d, "
9055                                 "parent cq-id=%d\n",
9056                                 phba->sli4_hba.nvmels_wq->queue_id,
9057                                 phba->sli4_hba.nvmels_cq->queue_id);
9058         }
9059
9060         /*
9061          * Create NVMET Receive Queue (RQ)
9062          */
9063         if (phba->nvmet_support) {
9064                 if ((!phba->sli4_hba.nvmet_cqset) ||
9065                     (!phba->sli4_hba.nvmet_mrq_hdr) ||
9066                     (!phba->sli4_hba.nvmet_mrq_data)) {
9067                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9068                                         "6130 MRQ CQ Queues not "
9069                                         "allocated\n");
9070                         rc = -ENOMEM;
9071                         goto out_destroy;
9072                 }
9073                 if (phba->cfg_nvmet_mrq > 1) {
9074                         rc = lpfc_mrq_create(phba,
9075                                              phba->sli4_hba.nvmet_mrq_hdr,
9076                                              phba->sli4_hba.nvmet_mrq_data,
9077                                              phba->sli4_hba.nvmet_cqset,
9078                                              LPFC_NVMET);
9079                         if (rc) {
9080                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9081                                                 "6098 Failed setup of NVMET "
9082                                                 "MRQ: rc = 0x%x\n",
9083                                                 (uint32_t)rc);
9084                                 goto out_destroy;
9085                         }
9086
9087                 } else {
9088                         rc = lpfc_rq_create(phba,
9089                                             phba->sli4_hba.nvmet_mrq_hdr[0],
9090                                             phba->sli4_hba.nvmet_mrq_data[0],
9091                                             phba->sli4_hba.nvmet_cqset[0],
9092                                             LPFC_NVMET);
9093                         if (rc) {
9094                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9095                                                 "6057 Failed setup of NVMET "
9096                                                 "Receive Queue: rc = 0x%x\n",
9097                                                 (uint32_t)rc);
9098                                 goto out_destroy;
9099                         }
9100
9101                         lpfc_printf_log(
9102                                 phba, KERN_INFO, LOG_INIT,
9103                                 "6099 NVMET RQ setup: hdr-rq-id=%d, "
9104                                 "dat-rq-id=%d parent cq-id=%d\n",
9105                                 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
9106                                 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
9107                                 phba->sli4_hba.nvmet_cqset[0]->queue_id);
9108
9109                 }
9110         }
9111
9112         if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9113                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9114                                 "0540 Receive Queue not allocated\n");
9115                 rc = -ENOMEM;
9116                 goto out_destroy;
9117         }
9118
9119         rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
9120                             phba->sli4_hba.els_cq, LPFC_USOL);
9121         if (rc) {
9122                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9123                                 "0541 Failed setup of Receive Queue: "
9124                                 "rc = 0x%x\n", (uint32_t)rc);
9125                 goto out_destroy;
9126         }
9127
9128         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9129                         "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
9130                         "parent cq-id=%d\n",
9131                         phba->sli4_hba.hdr_rq->queue_id,
9132                         phba->sli4_hba.dat_rq->queue_id,
9133                         phba->sli4_hba.els_cq->queue_id);
9134
9135         if (phba->cfg_fof) {
9136                 rc = lpfc_fof_queue_setup(phba);
9137                 if (rc) {
9138                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9139                                         "0549 Failed setup of FOF Queues: "
9140                                         "rc = 0x%x\n", rc);
9141                         goto out_destroy;
9142                 }
9143         }
9144
9145         for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9146                 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9147                                          phba->cfg_fcp_imax);
9148
9149         return 0;
9150
9151 out_destroy:
9152         lpfc_sli4_queue_unset(phba);
9153 out_error:
9154         return rc;
9155 }
9156
9157 /**
9158  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
9159  * @phba: pointer to lpfc hba data structure.
9160  *
9161  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
9162  * operation.
9163  *
9164  * Return codes
9165  *      0 - successful
9166  *      -ENOMEM - No available memory
9167  *      -EIO - The mailbox failed to complete successfully.
9168  **/
9169 void
9170 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9171 {
9172         int qidx;
9173
9174         /* Unset the queues created for Flash Optimized Fabric operations */
9175         if (phba->cfg_fof)
9176                 lpfc_fof_queue_destroy(phba);
9177
9178         /* Unset mailbox command work queue */
9179         if (phba->sli4_hba.mbx_wq)
9180                 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
9181
9182         /* Unset NVME LS work queue */
9183         if (phba->sli4_hba.nvmels_wq)
9184                 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
9185
9186         /* Unset ELS work queue */
9187         if (phba->sli4_hba.els_wq)
9188                 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
9189
9190         /* Unset unsolicited receive queue */
9191         if (phba->sli4_hba.hdr_rq)
9192                 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
9193                                 phba->sli4_hba.dat_rq);
9194
9195         /* Unset FCP work queue */
9196         if (phba->sli4_hba.fcp_wq)
9197                 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
9198                         lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
9199
9200         /* Unset NVME work queue */
9201         if (phba->sli4_hba.nvme_wq) {
9202                 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
9203                         lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
9204         }
9205
9206         /* Unset mailbox command complete queue */
9207         if (phba->sli4_hba.mbx_cq)
9208                 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
9209
9210         /* Unset ELS complete queue */
9211         if (phba->sli4_hba.els_cq)
9212                 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
9213
9214         /* Unset NVME LS complete queue */
9215         if (phba->sli4_hba.nvmels_cq)
9216                 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9217
9218         /* Unset NVME response complete queue */
9219         if (phba->sli4_hba.nvme_cq)
9220                 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
9221                         lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
9222
9223         if (phba->nvmet_support) {
9224                 /* Unset NVMET MRQ queue */
9225                 if (phba->sli4_hba.nvmet_mrq_hdr) {
9226                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9227                                 lpfc_rq_destroy(
9228                                         phba,
9229                                         phba->sli4_hba.nvmet_mrq_hdr[qidx],
9230                                         phba->sli4_hba.nvmet_mrq_data[qidx]);
9231                 }
9232
9233                 /* Unset NVMET CQ Set complete queue */
9234                 if (phba->sli4_hba.nvmet_cqset) {
9235                         for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9236                                 lpfc_cq_destroy(
9237                                         phba, phba->sli4_hba.nvmet_cqset[qidx]);
9238                 }
9239         }
9240
9241         /* Unset FCP response complete queue */
9242         if (phba->sli4_hba.fcp_cq)
9243                 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
9244                         lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
9245
9246         /* Unset fast-path event queue */
9247         if (phba->sli4_hba.hba_eq)
9248                 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
9249                         lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
9250 }
9251
9252 /**
9253  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
9254  * @phba: pointer to lpfc hba data structure.
9255  *
9256  * This routine is invoked to allocate and set up a pool of completion queue
9257  * events. The body of the completion queue event is a completion queue entry
9258  * CQE. For now, this pool is used for the interrupt service routine to queue
9259  * the following HBA completion queue events for the worker thread to process:
9260  *   - Mailbox asynchronous events
9261  *   - Receive queue completion unsolicited events
9262  * Later, this can be used for all the slow-path events.
9263  *
9264  * Return codes
9265  *      0 - successful
9266  *      -ENOMEM - No available memory
9267  **/
9268 static int
9269 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
9270 {
9271         struct lpfc_cq_event *cq_event;
9272         int i;
9273
9274         for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
9275                 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
9276                 if (!cq_event)
9277                         goto out_pool_create_fail;
9278                 list_add_tail(&cq_event->list,
9279                               &phba->sli4_hba.sp_cqe_event_pool);
9280         }
9281         return 0;
9282
9283 out_pool_create_fail:
9284         lpfc_sli4_cq_event_pool_destroy(phba);
9285         return -ENOMEM;
9286 }
9287
9288 /**
9289  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
9290  * @phba: pointer to lpfc hba data structure.
9291  *
9292  * This routine is invoked to free the pool of completion queue events at
9293  * driver unload time. Note that, it is the responsibility of the driver
9294  * cleanup routine to free all the outstanding completion-queue events
9295  * allocated from this pool back into the pool before invoking this routine
9296  * to destroy the pool.
9297  **/
9298 static void
9299 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
9300 {
9301         struct lpfc_cq_event *cq_event, *next_cq_event;
9302
9303         list_for_each_entry_safe(cq_event, next_cq_event,
9304                                  &phba->sli4_hba.sp_cqe_event_pool, list) {
9305                 list_del(&cq_event->list);
9306                 kfree(cq_event);
9307         }
9308 }
9309
9310 /**
9311  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9312  * @phba: pointer to lpfc hba data structure.
9313  *
9314  * This routine is the lock free version of the API invoked to allocate a
9315  * completion-queue event from the free pool.
9316  *
9317  * Return: Pointer to the newly allocated completion-queue event if successful
9318  *         NULL otherwise.
9319  **/
9320 struct lpfc_cq_event *
9321 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9322 {
9323         struct lpfc_cq_event *cq_event = NULL;
9324
9325         list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
9326                          struct lpfc_cq_event, list);
9327         return cq_event;
9328 }
9329
9330 /**
9331  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
9332  * @phba: pointer to lpfc hba data structure.
9333  *
9334  * This routine is the lock version of the API invoked to allocate a
9335  * completion-queue event from the free pool.
9336  *
9337  * Return: Pointer to the newly allocated completion-queue event if successful
9338  *         NULL otherwise.
9339  **/
9340 struct lpfc_cq_event *
9341 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
9342 {
9343         struct lpfc_cq_event *cq_event;
9344         unsigned long iflags;
9345
9346         spin_lock_irqsave(&phba->hbalock, iflags);
9347         cq_event = __lpfc_sli4_cq_event_alloc(phba);
9348         spin_unlock_irqrestore(&phba->hbalock, iflags);
9349         return cq_event;
9350 }
9351
9352 /**
9353  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9354  * @phba: pointer to lpfc hba data structure.
9355  * @cq_event: pointer to the completion queue event to be freed.
9356  *
9357  * This routine is the lock free version of the API invoked to release a
9358  * completion-queue event back into the free pool.
9359  **/
9360 void
9361 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9362                              struct lpfc_cq_event *cq_event)
9363 {
9364         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
9365 }
9366
9367 /**
9368  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
9369  * @phba: pointer to lpfc hba data structure.
9370  * @cq_event: pointer to the completion queue event to be freed.
9371  *
9372  * This routine is the lock version of the API invoked to release a
9373  * completion-queue event back into the free pool.
9374  **/
9375 void
9376 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
9377                            struct lpfc_cq_event *cq_event)
9378 {
9379         unsigned long iflags;
9380         spin_lock_irqsave(&phba->hbalock, iflags);
9381         __lpfc_sli4_cq_event_release(phba, cq_event);
9382         spin_unlock_irqrestore(&phba->hbalock, iflags);
9383 }
9384
9385 /**
9386  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
9387  * @phba: pointer to lpfc hba data structure.
9388  *
9389  * This routine is to free all the pending completion-queue events to the
9390  * back into the free pool for device reset.
9391  **/
9392 static void
9393 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
9394 {
9395         LIST_HEAD(cqelist);
9396         struct lpfc_cq_event *cqe;
9397         unsigned long iflags;
9398
9399         /* Retrieve all the pending WCQEs from pending WCQE lists */
9400         spin_lock_irqsave(&phba->hbalock, iflags);
9401         /* Pending FCP XRI abort events */
9402         list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
9403                          &cqelist);
9404         /* Pending ELS XRI abort events */
9405         list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
9406                          &cqelist);
9407         /* Pending asynnc events */
9408         list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
9409                          &cqelist);
9410         spin_unlock_irqrestore(&phba->hbalock, iflags);
9411
9412         while (!list_empty(&cqelist)) {
9413                 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
9414                 lpfc_sli4_cq_event_release(phba, cqe);
9415         }
9416 }
9417
9418 /**
9419  * lpfc_pci_function_reset - Reset pci function.
9420  * @phba: pointer to lpfc hba data structure.
9421  *
9422  * This routine is invoked to request a PCI function reset. It will destroys
9423  * all resources assigned to the PCI function which originates this request.
9424  *
9425  * Return codes
9426  *      0 - successful
9427  *      -ENOMEM - No available memory
9428  *      -EIO - The mailbox failed to complete successfully.
9429  **/
9430 int
9431 lpfc_pci_function_reset(struct lpfc_hba *phba)
9432 {
9433         LPFC_MBOXQ_t *mboxq;
9434         uint32_t rc = 0, if_type;
9435         uint32_t shdr_status, shdr_add_status;
9436         uint32_t rdy_chk;
9437         uint32_t port_reset = 0;
9438         union lpfc_sli4_cfg_shdr *shdr;
9439         struct lpfc_register reg_data;
9440         uint16_t devid;
9441
9442         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9443         switch (if_type) {
9444         case LPFC_SLI_INTF_IF_TYPE_0:
9445                 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
9446                                                        GFP_KERNEL);
9447                 if (!mboxq) {
9448                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9449                                         "0494 Unable to allocate memory for "
9450                                         "issuing SLI_FUNCTION_RESET mailbox "
9451                                         "command\n");
9452                         return -ENOMEM;
9453                 }
9454
9455                 /* Setup PCI function reset mailbox-ioctl command */
9456                 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9457                                  LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
9458                                  LPFC_SLI4_MBX_EMBED);
9459                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9460                 shdr = (union lpfc_sli4_cfg_shdr *)
9461                         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
9462                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9463                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
9464                                          &shdr->response);
9465                 if (rc != MBX_TIMEOUT)
9466                         mempool_free(mboxq, phba->mbox_mem_pool);
9467                 if (shdr_status || shdr_add_status || rc) {
9468                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9469                                         "0495 SLI_FUNCTION_RESET mailbox "
9470                                         "failed with status x%x add_status x%x,"
9471                                         " mbx status x%x\n",
9472                                         shdr_status, shdr_add_status, rc);
9473                         rc = -ENXIO;
9474                 }
9475                 break;
9476         case LPFC_SLI_INTF_IF_TYPE_2:
9477         case LPFC_SLI_INTF_IF_TYPE_6:
9478 wait:
9479                 /*
9480                  * Poll the Port Status Register and wait for RDY for
9481                  * up to 30 seconds. If the port doesn't respond, treat
9482                  * it as an error.
9483                  */
9484                 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
9485                         if (lpfc_readl(phba->sli4_hba.u.if_type2.
9486                                 STATUSregaddr, &reg_data.word0)) {
9487                                 rc = -ENODEV;
9488                                 goto out;
9489                         }
9490                         if (bf_get(lpfc_sliport_status_rdy, &reg_data))
9491                                 break;
9492                         msleep(20);
9493                 }
9494
9495                 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
9496                         phba->work_status[0] = readl(
9497                                 phba->sli4_hba.u.if_type2.ERR1regaddr);
9498                         phba->work_status[1] = readl(
9499                                 phba->sli4_hba.u.if_type2.ERR2regaddr);
9500                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9501                                         "2890 Port not ready, port status reg "
9502                                         "0x%x error 1=0x%x, error 2=0x%x\n",
9503                                         reg_data.word0,
9504                                         phba->work_status[0],
9505                                         phba->work_status[1]);
9506                         rc = -ENODEV;
9507                         goto out;
9508                 }
9509
9510                 if (!port_reset) {
9511                         /*
9512                          * Reset the port now
9513                          */
9514                         reg_data.word0 = 0;
9515                         bf_set(lpfc_sliport_ctrl_end, &reg_data,
9516                                LPFC_SLIPORT_LITTLE_ENDIAN);
9517                         bf_set(lpfc_sliport_ctrl_ip, &reg_data,
9518                                LPFC_SLIPORT_INIT_PORT);
9519                         writel(reg_data.word0, phba->sli4_hba.u.if_type2.
9520                                CTRLregaddr);
9521                         /* flush */
9522                         pci_read_config_word(phba->pcidev,
9523                                              PCI_DEVICE_ID, &devid);
9524
9525                         port_reset = 1;
9526                         msleep(20);
9527                         goto wait;
9528                 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
9529                         rc = -ENODEV;
9530                         goto out;
9531                 }
9532                 break;
9533
9534         case LPFC_SLI_INTF_IF_TYPE_1:
9535         default:
9536                 break;
9537         }
9538
9539 out:
9540         /* Catch the not-ready port failure after a port reset. */
9541         if (rc) {
9542                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9543                                 "3317 HBA not functional: IP Reset Failed "
9544                                 "try: echo fw_reset > board_mode\n");
9545                 rc = -ENODEV;
9546         }
9547
9548         return rc;
9549 }
9550
9551 /**
9552  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
9553  * @phba: pointer to lpfc hba data structure.
9554  *
9555  * This routine is invoked to set up the PCI device memory space for device
9556  * with SLI-4 interface spec.
9557  *
9558  * Return codes
9559  *      0 - successful
9560  *      other values - error
9561  **/
9562 static int
9563 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
9564 {
9565         struct pci_dev *pdev;
9566         unsigned long bar0map_len, bar1map_len, bar2map_len;
9567         int error = -ENODEV;
9568         uint32_t if_type;
9569
9570         /* Obtain PCI device reference */
9571         if (!phba->pcidev)
9572                 return error;
9573         else
9574                 pdev = phba->pcidev;
9575
9576         /* Set the device DMA mask size */
9577         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
9578          || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
9579                 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
9580                  || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
9581                         return error;
9582                 }
9583         }
9584
9585         /*
9586          * The BARs and register set definitions and offset locations are
9587          * dependent on the if_type.
9588          */
9589         if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
9590                                   &phba->sli4_hba.sli_intf.word0)) {
9591                 return error;
9592         }
9593
9594         /* There is no SLI3 failback for SLI4 devices. */
9595         if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
9596             LPFC_SLI_INTF_VALID) {
9597                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9598                                 "2894 SLI_INTF reg contents invalid "
9599                                 "sli_intf reg 0x%x\n",
9600                                 phba->sli4_hba.sli_intf.word0);
9601                 return error;
9602         }
9603
9604         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9605         /*
9606          * Get the bus address of SLI4 device Bar regions and the
9607          * number of bytes required by each mapping. The mapping of the
9608          * particular PCI BARs regions is dependent on the type of
9609          * SLI4 device.
9610          */
9611         if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
9612                 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
9613                 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
9614
9615                 /*
9616                  * Map SLI4 PCI Config Space Register base to a kernel virtual
9617                  * addr
9618                  */
9619                 phba->sli4_hba.conf_regs_memmap_p =
9620                         ioremap(phba->pci_bar0_map, bar0map_len);
9621                 if (!phba->sli4_hba.conf_regs_memmap_p) {
9622                         dev_printk(KERN_ERR, &pdev->dev,
9623                                    "ioremap failed for SLI4 PCI config "
9624                                    "registers.\n");
9625                         goto out;
9626                 }
9627                 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
9628                 /* Set up BAR0 PCI config space register memory map */
9629                 lpfc_sli4_bar0_register_memmap(phba, if_type);
9630         } else {
9631                 phba->pci_bar0_map = pci_resource_start(pdev, 1);
9632                 bar0map_len = pci_resource_len(pdev, 1);
9633                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9634                         dev_printk(KERN_ERR, &pdev->dev,
9635                            "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
9636                         goto out;
9637                 }
9638                 phba->sli4_hba.conf_regs_memmap_p =
9639                                 ioremap(phba->pci_bar0_map, bar0map_len);
9640                 if (!phba->sli4_hba.conf_regs_memmap_p) {
9641                         dev_printk(KERN_ERR, &pdev->dev,
9642                                 "ioremap failed for SLI4 PCI config "
9643                                 "registers.\n");
9644                                 goto out;
9645                 }
9646                 lpfc_sli4_bar0_register_memmap(phba, if_type);
9647         }
9648
9649         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
9650                 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
9651                         /*
9652                          * Map SLI4 if type 0 HBA Control Register base to a
9653                          * kernel virtual address and setup the registers.
9654                          */
9655                         phba->pci_bar1_map = pci_resource_start(pdev,
9656                                                                 PCI_64BIT_BAR2);
9657                         bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
9658                         phba->sli4_hba.ctrl_regs_memmap_p =
9659                                         ioremap(phba->pci_bar1_map,
9660                                                 bar1map_len);
9661                         if (!phba->sli4_hba.ctrl_regs_memmap_p) {
9662                                 dev_err(&pdev->dev,
9663                                            "ioremap failed for SLI4 HBA "
9664                                             "control registers.\n");
9665                                 error = -ENOMEM;
9666                                 goto out_iounmap_conf;
9667                         }
9668                         phba->pci_bar2_memmap_p =
9669                                          phba->sli4_hba.ctrl_regs_memmap_p;
9670                         lpfc_sli4_bar1_register_memmap(phba, if_type);
9671                 } else {
9672                         error = -ENOMEM;
9673                         goto out_iounmap_conf;
9674                 }
9675         }
9676
9677         if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
9678             (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
9679                 /*
9680                  * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
9681                  * virtual address and setup the registers.
9682                  */
9683                 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
9684                 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
9685                 phba->sli4_hba.drbl_regs_memmap_p =
9686                                 ioremap(phba->pci_bar1_map, bar1map_len);
9687                 if (!phba->sli4_hba.drbl_regs_memmap_p) {
9688                         dev_err(&pdev->dev,
9689                            "ioremap failed for SLI4 HBA doorbell registers.\n");
9690                         goto out_iounmap_conf;
9691                 }
9692                 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
9693                 lpfc_sli4_bar1_register_memmap(phba, if_type);
9694         }
9695
9696         if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
9697                 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
9698                         /*
9699                          * Map SLI4 if type 0 HBA Doorbell Register base to
9700                          * a kernel virtual address and setup the registers.
9701                          */
9702                         phba->pci_bar2_map = pci_resource_start(pdev,
9703                                                                 PCI_64BIT_BAR4);
9704                         bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
9705                         phba->sli4_hba.drbl_regs_memmap_p =
9706                                         ioremap(phba->pci_bar2_map,
9707                                                 bar2map_len);
9708                         if (!phba->sli4_hba.drbl_regs_memmap_p) {
9709                                 dev_err(&pdev->dev,
9710                                            "ioremap failed for SLI4 HBA"
9711                                            " doorbell registers.\n");
9712                                 error = -ENOMEM;
9713                                 goto out_iounmap_ctrl;
9714                         }
9715                         phba->pci_bar4_memmap_p =
9716                                         phba->sli4_hba.drbl_regs_memmap_p;
9717                         error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
9718                         if (error)
9719                                 goto out_iounmap_all;
9720                 } else {
9721                         error = -ENOMEM;
9722                         goto out_iounmap_all;
9723                 }
9724         }
9725
9726         if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
9727             pci_resource_start(pdev, PCI_64BIT_BAR4)) {
9728                 /*
9729                  * Map SLI4 if type 6 HBA DPP Register base to a kernel
9730                  * virtual address and setup the registers.
9731                  */
9732                 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
9733                 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
9734                 phba->sli4_hba.dpp_regs_memmap_p =
9735                                 ioremap(phba->pci_bar2_map, bar2map_len);
9736                 if (!phba->sli4_hba.dpp_regs_memmap_p) {
9737                         dev_err(&pdev->dev,
9738                            "ioremap failed for SLI4 HBA dpp registers.\n");
9739                         goto out_iounmap_ctrl;
9740                 }
9741                 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
9742         }
9743
9744         /* Set up the EQ/CQ register handeling functions now */
9745         switch (if_type) {
9746         case LPFC_SLI_INTF_IF_TYPE_0:
9747         case LPFC_SLI_INTF_IF_TYPE_2:
9748                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
9749                 phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
9750                 phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
9751                 break;
9752         case LPFC_SLI_INTF_IF_TYPE_6:
9753                 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
9754                 phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
9755                 phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
9756                 break;
9757         default:
9758                 break;
9759         }
9760
9761         return 0;
9762
9763 out_iounmap_all:
9764         iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9765 out_iounmap_ctrl:
9766         iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
9767 out_iounmap_conf:
9768         iounmap(phba->sli4_hba.conf_regs_memmap_p);
9769 out:
9770         return error;
9771 }
9772
9773 /**
9774  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
9775  * @phba: pointer to lpfc hba data structure.
9776  *
9777  * This routine is invoked to unset the PCI device memory space for device
9778  * with SLI-4 interface spec.
9779  **/
9780 static void
9781 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
9782 {
9783         uint32_t if_type;
9784         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9785
9786         switch (if_type) {
9787         case LPFC_SLI_INTF_IF_TYPE_0:
9788                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9789                 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
9790                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9791                 break;
9792         case LPFC_SLI_INTF_IF_TYPE_2:
9793                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9794                 break;
9795         case LPFC_SLI_INTF_IF_TYPE_6:
9796                 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
9797                 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9798                 break;
9799         case LPFC_SLI_INTF_IF_TYPE_1:
9800         default:
9801                 dev_printk(KERN_ERR, &phba->pcidev->dev,
9802                            "FATAL - unsupported SLI4 interface type - %d\n",
9803                            if_type);
9804                 break;
9805         }
9806 }
9807
9808 /**
9809  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
9810  * @phba: pointer to lpfc hba data structure.
9811  *
9812  * This routine is invoked to enable the MSI-X interrupt vectors to device
9813  * with SLI-3 interface specs.
9814  *
9815  * Return codes
9816  *   0 - successful
9817  *   other values - error
9818  **/
9819 static int
9820 lpfc_sli_enable_msix(struct lpfc_hba *phba)
9821 {
9822         int rc;
9823         LPFC_MBOXQ_t *pmb;
9824
9825         /* Set up MSI-X multi-message vectors */
9826         rc = pci_alloc_irq_vectors(phba->pcidev,
9827                         LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
9828         if (rc < 0) {
9829                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9830                                 "0420 PCI enable MSI-X failed (%d)\n", rc);
9831                 goto vec_fail_out;
9832         }
9833
9834         /*
9835          * Assign MSI-X vectors to interrupt handlers
9836          */
9837
9838         /* vector-0 is associated to slow-path handler */
9839         rc = request_irq(pci_irq_vector(phba->pcidev, 0),
9840                          &lpfc_sli_sp_intr_handler, 0,
9841                          LPFC_SP_DRIVER_HANDLER_NAME, phba);
9842         if (rc) {
9843                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9844                                 "0421 MSI-X slow-path request_irq failed "
9845                                 "(%d)\n", rc);
9846                 goto msi_fail_out;
9847         }
9848
9849         /* vector-1 is associated to fast-path handler */
9850         rc = request_irq(pci_irq_vector(phba->pcidev, 1),
9851                          &lpfc_sli_fp_intr_handler, 0,
9852                          LPFC_FP_DRIVER_HANDLER_NAME, phba);
9853
9854         if (rc) {
9855                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9856                                 "0429 MSI-X fast-path request_irq failed "
9857                                 "(%d)\n", rc);
9858                 goto irq_fail_out;
9859         }
9860
9861         /*
9862          * Configure HBA MSI-X attention conditions to messages
9863          */
9864         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9865
9866         if (!pmb) {
9867                 rc = -ENOMEM;
9868                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9869                                 "0474 Unable to allocate memory for issuing "
9870                                 "MBOX_CONFIG_MSI command\n");
9871                 goto mem_fail_out;
9872         }
9873         rc = lpfc_config_msi(phba, pmb);
9874         if (rc)
9875                 goto mbx_fail_out;
9876         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9877         if (rc != MBX_SUCCESS) {
9878                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
9879                                 "0351 Config MSI mailbox command failed, "
9880                                 "mbxCmd x%x, mbxStatus x%x\n",
9881                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
9882                 goto mbx_fail_out;
9883         }
9884
9885         /* Free memory allocated for mailbox command */
9886         mempool_free(pmb, phba->mbox_mem_pool);
9887         return rc;
9888
9889 mbx_fail_out:
9890         /* Free memory allocated for mailbox command */
9891         mempool_free(pmb, phba->mbox_mem_pool);
9892
9893 mem_fail_out:
9894         /* free the irq already requested */
9895         free_irq(pci_irq_vector(phba->pcidev, 1), phba);
9896
9897 irq_fail_out:
9898         /* free the irq already requested */
9899         free_irq(pci_irq_vector(phba->pcidev, 0), phba);
9900
9901 msi_fail_out:
9902         /* Unconfigure MSI-X capability structure */
9903         pci_free_irq_vectors(phba->pcidev);
9904
9905 vec_fail_out:
9906         return rc;
9907 }
9908
9909 /**
9910  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
9911  * @phba: pointer to lpfc hba data structure.
9912  *
9913  * This routine is invoked to enable the MSI interrupt mode to device with
9914  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
9915  * enable the MSI vector. The device driver is responsible for calling the
9916  * request_irq() to register MSI vector with a interrupt the handler, which
9917  * is done in this function.
9918  *
9919  * Return codes
9920  *      0 - successful
9921  *      other values - error
9922  */
9923 static int
9924 lpfc_sli_enable_msi(struct lpfc_hba *phba)
9925 {
9926         int rc;
9927
9928         rc = pci_enable_msi(phba->pcidev);
9929         if (!rc)
9930                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9931                                 "0462 PCI enable MSI mode success.\n");
9932         else {
9933                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9934                                 "0471 PCI enable MSI mode failed (%d)\n", rc);
9935                 return rc;
9936         }
9937
9938         rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
9939                          0, LPFC_DRIVER_NAME, phba);
9940         if (rc) {
9941                 pci_disable_msi(phba->pcidev);
9942                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9943                                 "0478 MSI request_irq failed (%d)\n", rc);
9944         }
9945         return rc;
9946 }
9947
9948 /**
9949  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
9950  * @phba: pointer to lpfc hba data structure.
9951  *
9952  * This routine is invoked to enable device interrupt and associate driver's
9953  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
9954  * spec. Depends on the interrupt mode configured to the driver, the driver
9955  * will try to fallback from the configured interrupt mode to an interrupt
9956  * mode which is supported by the platform, kernel, and device in the order
9957  * of:
9958  * MSI-X -> MSI -> IRQ.
9959  *
9960  * Return codes
9961  *   0 - successful
9962  *   other values - error
9963  **/
9964 static uint32_t
9965 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
9966 {
9967         uint32_t intr_mode = LPFC_INTR_ERROR;
9968         int retval;
9969
9970         if (cfg_mode == 2) {
9971                 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
9972                 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
9973                 if (!retval) {
9974                         /* Now, try to enable MSI-X interrupt mode */
9975                         retval = lpfc_sli_enable_msix(phba);
9976                         if (!retval) {
9977                                 /* Indicate initialization to MSI-X mode */
9978                                 phba->intr_type = MSIX;
9979                                 intr_mode = 2;
9980                         }
9981                 }
9982         }
9983
9984         /* Fallback to MSI if MSI-X initialization failed */
9985         if (cfg_mode >= 1 && phba->intr_type == NONE) {
9986                 retval = lpfc_sli_enable_msi(phba);
9987                 if (!retval) {
9988                         /* Indicate initialization to MSI mode */
9989                         phba->intr_type = MSI;
9990                         intr_mode = 1;
9991                 }
9992         }
9993
9994         /* Fallback to INTx if both MSI-X/MSI initalization failed */
9995         if (phba->intr_type == NONE) {
9996                 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
9997                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9998                 if (!retval) {
9999                         /* Indicate initialization to INTx mode */
10000                         phba->intr_type = INTx;
10001                         intr_mode = 0;
10002                 }
10003         }
10004         return intr_mode;
10005 }
10006
10007 /**
10008  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
10009  * @phba: pointer to lpfc hba data structure.
10010  *
10011  * This routine is invoked to disable device interrupt and disassociate the
10012  * driver's interrupt handler(s) from interrupt vector(s) to device with
10013  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
10014  * release the interrupt vector(s) for the message signaled interrupt.
10015  **/
10016 static void
10017 lpfc_sli_disable_intr(struct lpfc_hba *phba)
10018 {
10019         int nr_irqs, i;
10020
10021         if (phba->intr_type == MSIX)
10022                 nr_irqs = LPFC_MSIX_VECTORS;
10023         else
10024                 nr_irqs = 1;
10025
10026         for (i = 0; i < nr_irqs; i++)
10027                 free_irq(pci_irq_vector(phba->pcidev, i), phba);
10028         pci_free_irq_vectors(phba->pcidev);
10029
10030         /* Reset interrupt management states */
10031         phba->intr_type = NONE;
10032         phba->sli.slistat.sli_intr = 0;
10033 }
10034
10035 /**
10036  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
10037  * @phba: pointer to lpfc hba data structure.
10038  * @vectors: number of msix vectors allocated.
10039  *
10040  * The routine will figure out the CPU affinity assignment for every
10041  * MSI-X vector allocated for the HBA.  The hba_eq_hdl will be updated
10042  * with a pointer to the CPU mask that defines ALL the CPUs this vector
10043  * can be associated with. If the vector can be unquely associated with
10044  * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
10045  * In addition, the CPU to IO channel mapping will be calculated
10046  * and the phba->sli4_hba.cpu_map array will reflect this.
10047  */
10048 static void
10049 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10050 {
10051         struct lpfc_vector_map_info *cpup;
10052         int index = 0;
10053         int vec = 0;
10054         int cpu;
10055 #ifdef CONFIG_X86
10056         struct cpuinfo_x86 *cpuinfo;
10057 #endif
10058
10059         /* Init cpu_map array */
10060         memset(phba->sli4_hba.cpu_map, 0xff,
10061                (sizeof(struct lpfc_vector_map_info) *
10062                phba->sli4_hba.num_present_cpu));
10063
10064         /* Update CPU map with physical id and core id of each CPU */
10065         cpup = phba->sli4_hba.cpu_map;
10066         for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
10067 #ifdef CONFIG_X86
10068                 cpuinfo = &cpu_data(cpu);
10069                 cpup->phys_id = cpuinfo->phys_proc_id;
10070                 cpup->core_id = cpuinfo->cpu_core_id;
10071 #else
10072                 /* No distinction between CPUs for other platforms */
10073                 cpup->phys_id = 0;
10074                 cpup->core_id = 0;
10075 #endif
10076                 cpup->channel_id = index;  /* For now round robin */
10077                 cpup->irq = pci_irq_vector(phba->pcidev, vec);
10078                 vec++;
10079                 if (vec >= vectors)
10080                         vec = 0;
10081                 index++;
10082                 if (index >= phba->cfg_fcp_io_channel)
10083                         index = 0;
10084                 cpup++;
10085         }
10086 }
10087
10088
10089 /**
10090  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
10091  * @phba: pointer to lpfc hba data structure.
10092  *
10093  * This routine is invoked to enable the MSI-X interrupt vectors to device
10094  * with SLI-4 interface spec.
10095  *
10096  * Return codes
10097  * 0 - successful
10098  * other values - error
10099  **/
10100 static int
10101 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
10102 {
10103         int vectors, rc, index;
10104         char *name;
10105
10106         /* Set up MSI-X multi-message vectors */
10107         vectors = phba->io_channel_irqs;
10108         if (phba->cfg_fof)
10109                 vectors++;
10110
10111         rc = pci_alloc_irq_vectors(phba->pcidev,
10112                                 (phba->nvmet_support) ? 1 : 2,
10113                                 vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
10114         if (rc < 0) {
10115                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10116                                 "0484 PCI enable MSI-X failed (%d)\n", rc);
10117                 goto vec_fail_out;
10118         }
10119         vectors = rc;
10120
10121         /* Assign MSI-X vectors to interrupt handlers */
10122         for (index = 0; index < vectors; index++) {
10123                 name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
10124                 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
10125                 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
10126                          LPFC_DRIVER_HANDLER_NAME"%d", index);
10127
10128                 phba->sli4_hba.hba_eq_hdl[index].idx = index;
10129                 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
10130                 atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
10131                 if (phba->cfg_fof && (index == (vectors - 1)))
10132                         rc = request_irq(pci_irq_vector(phba->pcidev, index),
10133                                  &lpfc_sli4_fof_intr_handler, 0,
10134                                  name,
10135                                  &phba->sli4_hba.hba_eq_hdl[index]);
10136                 else
10137                         rc = request_irq(pci_irq_vector(phba->pcidev, index),
10138                                  &lpfc_sli4_hba_intr_handler, 0,
10139                                  name,
10140                                  &phba->sli4_hba.hba_eq_hdl[index]);
10141                 if (rc) {
10142                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10143                                         "0486 MSI-X fast-path (%d) "
10144                                         "request_irq failed (%d)\n", index, rc);
10145                         goto cfg_fail_out;
10146                 }
10147         }
10148
10149         if (phba->cfg_fof)
10150                 vectors--;
10151
10152         if (vectors != phba->io_channel_irqs) {
10153                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10154                                 "3238 Reducing IO channels to match number of "
10155                                 "MSI-X vectors, requested %d got %d\n",
10156                                 phba->io_channel_irqs, vectors);
10157                 if (phba->cfg_fcp_io_channel > vectors)
10158                         phba->cfg_fcp_io_channel = vectors;
10159                 if (phba->cfg_nvme_io_channel > vectors)
10160                         phba->cfg_nvme_io_channel = vectors;
10161                 if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
10162                         phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10163                 else
10164                         phba->io_channel_irqs = phba->cfg_nvme_io_channel;
10165         }
10166         lpfc_cpu_affinity_check(phba, vectors);
10167
10168         return rc;
10169
10170 cfg_fail_out:
10171         /* free the irq already requested */
10172         for (--index; index >= 0; index--)
10173                 free_irq(pci_irq_vector(phba->pcidev, index),
10174                                 &phba->sli4_hba.hba_eq_hdl[index]);
10175
10176         /* Unconfigure MSI-X capability structure */
10177         pci_free_irq_vectors(phba->pcidev);
10178
10179 vec_fail_out:
10180         return rc;
10181 }
10182
10183 /**
10184  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
10185  * @phba: pointer to lpfc hba data structure.
10186  *
10187  * This routine is invoked to enable the MSI interrupt mode to device with
10188  * SLI-4 interface spec. The kernel function pci_enable_msi() is called
10189  * to enable the MSI vector. The device driver is responsible for calling
10190  * the request_irq() to register MSI vector with a interrupt the handler,
10191  * which is done in this function.
10192  *
10193  * Return codes
10194  *      0 - successful
10195  *      other values - error
10196  **/
10197 static int
10198 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
10199 {
10200         int rc, index;
10201
10202         rc = pci_enable_msi(phba->pcidev);
10203         if (!rc)
10204                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10205                                 "0487 PCI enable MSI mode success.\n");
10206         else {
10207                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10208                                 "0488 PCI enable MSI mode failed (%d)\n", rc);
10209                 return rc;
10210         }
10211
10212         rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
10213                          0, LPFC_DRIVER_NAME, phba);
10214         if (rc) {
10215                 pci_disable_msi(phba->pcidev);
10216                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10217                                 "0490 MSI request_irq failed (%d)\n", rc);
10218                 return rc;
10219         }
10220
10221         for (index = 0; index < phba->io_channel_irqs; index++) {
10222                 phba->sli4_hba.hba_eq_hdl[index].idx = index;
10223                 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
10224         }
10225
10226         if (phba->cfg_fof) {
10227                 phba->sli4_hba.hba_eq_hdl[index].idx = index;
10228                 phba->sli4_hba.hba_eq_hdl[index].phba = phba;
10229         }
10230         return 0;
10231 }
10232
10233 /**
10234  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
10235  * @phba: pointer to lpfc hba data structure.
10236  *
10237  * This routine is invoked to enable device interrupt and associate driver's
10238  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
10239  * interface spec. Depends on the interrupt mode configured to the driver,
10240  * the driver will try to fallback from the configured interrupt mode to an
10241  * interrupt mode which is supported by the platform, kernel, and device in
10242  * the order of:
10243  * MSI-X -> MSI -> IRQ.
10244  *
10245  * Return codes
10246  *      0 - successful
10247  *      other values - error
10248  **/
10249 static uint32_t
10250 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
10251 {
10252         uint32_t intr_mode = LPFC_INTR_ERROR;
10253         int retval, idx;
10254
10255         if (cfg_mode == 2) {
10256                 /* Preparation before conf_msi mbox cmd */
10257                 retval = 0;
10258                 if (!retval) {
10259                         /* Now, try to enable MSI-X interrupt mode */
10260                         retval = lpfc_sli4_enable_msix(phba);
10261                         if (!retval) {
10262                                 /* Indicate initialization to MSI-X mode */
10263                                 phba->intr_type = MSIX;
10264                                 intr_mode = 2;
10265                         }
10266                 }
10267         }
10268
10269         /* Fallback to MSI if MSI-X initialization failed */
10270         if (cfg_mode >= 1 && phba->intr_type == NONE) {
10271                 retval = lpfc_sli4_enable_msi(phba);
10272                 if (!retval) {
10273                         /* Indicate initialization to MSI mode */
10274                         phba->intr_type = MSI;
10275                         intr_mode = 1;
10276                 }
10277         }
10278
10279         /* Fallback to INTx if both MSI-X/MSI initalization failed */
10280         if (phba->intr_type == NONE) {
10281                 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
10282                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
10283                 if (!retval) {
10284                         struct lpfc_hba_eq_hdl *eqhdl;
10285
10286                         /* Indicate initialization to INTx mode */
10287                         phba->intr_type = INTx;
10288                         intr_mode = 0;
10289
10290                         for (idx = 0; idx < phba->io_channel_irqs; idx++) {
10291                                 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
10292                                 eqhdl->idx = idx;
10293                                 eqhdl->phba = phba;
10294                                 atomic_set(&eqhdl->hba_eq_in_use, 1);
10295                         }
10296                         if (phba->cfg_fof) {
10297                                 eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
10298                                 eqhdl->idx = idx;
10299                                 eqhdl->phba = phba;
10300                                 atomic_set(&eqhdl->hba_eq_in_use, 1);
10301                         }
10302                 }
10303         }
10304         return intr_mode;
10305 }
10306
10307 /**
10308  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
10309  * @phba: pointer to lpfc hba data structure.
10310  *
10311  * This routine is invoked to disable device interrupt and disassociate
10312  * the driver's interrupt handler(s) from interrupt vector(s) to device
10313  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
10314  * will release the interrupt vector(s) for the message signaled interrupt.
10315  **/
10316 static void
10317 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
10318 {
10319         /* Disable the currently initialized interrupt mode */
10320         if (phba->intr_type == MSIX) {
10321                 int index;
10322
10323                 /* Free up MSI-X multi-message vectors */
10324                 for (index = 0; index < phba->io_channel_irqs; index++)
10325                         free_irq(pci_irq_vector(phba->pcidev, index),
10326                                         &phba->sli4_hba.hba_eq_hdl[index]);
10327
10328                 if (phba->cfg_fof)
10329                         free_irq(pci_irq_vector(phba->pcidev, index),
10330                                         &phba->sli4_hba.hba_eq_hdl[index]);
10331         } else {
10332                 free_irq(phba->pcidev->irq, phba);
10333         }
10334
10335         pci_free_irq_vectors(phba->pcidev);
10336
10337         /* Reset interrupt management states */
10338         phba->intr_type = NONE;
10339         phba->sli.slistat.sli_intr = 0;
10340 }
10341
10342 /**
10343  * lpfc_unset_hba - Unset SLI3 hba device initialization
10344  * @phba: pointer to lpfc hba data structure.
10345  *
10346  * This routine is invoked to unset the HBA device initialization steps to
10347  * a device with SLI-3 interface spec.
10348  **/
10349 static void
10350 lpfc_unset_hba(struct lpfc_hba *phba)
10351 {
10352         struct lpfc_vport *vport = phba->pport;
10353         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
10354
10355         spin_lock_irq(shost->host_lock);
10356         vport->load_flag |= FC_UNLOADING;
10357         spin_unlock_irq(shost->host_lock);
10358
10359         kfree(phba->vpi_bmask);
10360         kfree(phba->vpi_ids);
10361
10362         lpfc_stop_hba_timers(phba);
10363
10364         phba->pport->work_port_events = 0;
10365
10366         lpfc_sli_hba_down(phba);
10367
10368         lpfc_sli_brdrestart(phba);
10369
10370         lpfc_sli_disable_intr(phba);
10371
10372         return;
10373 }
10374
10375 /**
10376  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
10377  * @phba: Pointer to HBA context object.
10378  *
10379  * This function is called in the SLI4 code path to wait for completion
10380  * of device's XRIs exchange busy. It will check the XRI exchange busy
10381  * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
10382  * that, it will check the XRI exchange busy on outstanding FCP and ELS
10383  * I/Os every 30 seconds, log error message, and wait forever. Only when
10384  * all XRI exchange busy complete, the driver unload shall proceed with
10385  * invoking the function reset ioctl mailbox command to the CNA and the
10386  * the rest of the driver unload resource release.
10387  **/
10388 static void
10389 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
10390 {
10391         int wait_time = 0;
10392         int nvme_xri_cmpl = 1;
10393         int nvmet_xri_cmpl = 1;
10394         int fcp_xri_cmpl = 1;
10395         int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
10396
10397         /* Driver just aborted IOs during the hba_unset process.  Pause
10398          * here to give the HBA time to complete the IO and get entries
10399          * into the abts lists.
10400          */
10401         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
10402
10403         /* Wait for NVME pending IO to flush back to transport. */
10404         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
10405                 lpfc_nvme_wait_for_io_drain(phba);
10406
10407         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10408                 fcp_xri_cmpl =
10409                         list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
10410         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10411                 nvme_xri_cmpl =
10412                         list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
10413                 nvmet_xri_cmpl =
10414                         list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
10415         }
10416
10417         while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
10418                !nvmet_xri_cmpl) {
10419                 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
10420                         if (!nvmet_xri_cmpl)
10421                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10422                                                 "6424 NVMET XRI exchange busy "
10423                                                 "wait time: %d seconds.\n",
10424                                                 wait_time/1000);
10425                         if (!nvme_xri_cmpl)
10426                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10427                                                 "6100 NVME XRI exchange busy "
10428                                                 "wait time: %d seconds.\n",
10429                                                 wait_time/1000);
10430                         if (!fcp_xri_cmpl)
10431                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10432                                                 "2877 FCP XRI exchange busy "
10433                                                 "wait time: %d seconds.\n",
10434                                                 wait_time/1000);
10435                         if (!els_xri_cmpl)
10436                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10437                                                 "2878 ELS XRI exchange busy "
10438                                                 "wait time: %d seconds.\n",
10439                                                 wait_time/1000);
10440                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
10441                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
10442                 } else {
10443                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
10444                         wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
10445                 }
10446                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10447                         nvme_xri_cmpl = list_empty(
10448                                 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
10449                         nvmet_xri_cmpl = list_empty(
10450                                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
10451                 }
10452
10453                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10454                         fcp_xri_cmpl = list_empty(
10455                                 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
10456
10457                 els_xri_cmpl =
10458                         list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
10459
10460         }
10461 }
10462
10463 /**
10464  * lpfc_sli4_hba_unset - Unset the fcoe hba
10465  * @phba: Pointer to HBA context object.
10466  *
10467  * This function is called in the SLI4 code path to reset the HBA's FCoE
10468  * function. The caller is not required to hold any lock. This routine
10469  * issues PCI function reset mailbox command to reset the FCoE function.
10470  * At the end of the function, it calls lpfc_hba_down_post function to
10471  * free any pending commands.
10472  **/
10473 static void
10474 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
10475 {
10476         int wait_cnt = 0;
10477         LPFC_MBOXQ_t *mboxq;
10478         struct pci_dev *pdev = phba->pcidev;
10479
10480         lpfc_stop_hba_timers(phba);
10481         phba->sli4_hba.intr_enable = 0;
10482
10483         /*
10484          * Gracefully wait out the potential current outstanding asynchronous
10485          * mailbox command.
10486          */
10487
10488         /* First, block any pending async mailbox command from posted */
10489         spin_lock_irq(&phba->hbalock);
10490         phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10491         spin_unlock_irq(&phba->hbalock);
10492         /* Now, trying to wait it out if we can */
10493         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10494                 msleep(10);
10495                 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
10496                         break;
10497         }
10498         /* Forcefully release the outstanding mailbox command if timed out */
10499         if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10500                 spin_lock_irq(&phba->hbalock);
10501                 mboxq = phba->sli.mbox_active;
10502                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10503                 __lpfc_mbox_cmpl_put(phba, mboxq);
10504                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10505                 phba->sli.mbox_active = NULL;
10506                 spin_unlock_irq(&phba->hbalock);
10507         }
10508
10509         /* Abort all iocbs associated with the hba */
10510         lpfc_sli_hba_iocb_abort(phba);
10511
10512         /* Wait for completion of device XRI exchange busy */
10513         lpfc_sli4_xri_exchange_busy_wait(phba);
10514
10515         /* Disable PCI subsystem interrupt */
10516         lpfc_sli4_disable_intr(phba);
10517
10518         /* Disable SR-IOV if enabled */
10519         if (phba->cfg_sriov_nr_virtfn)
10520                 pci_disable_sriov(pdev);
10521
10522         /* Stop kthread signal shall trigger work_done one more time */
10523         kthread_stop(phba->worker_thread);
10524
10525         /* Disable FW logging to host memory */
10526         writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
10527                phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
10528
10529         /* Free RAS DMA memory */
10530         if (phba->ras_fwlog.ras_enabled == true)
10531                 lpfc_sli4_ras_dma_free(phba);
10532
10533         /* Unset the queues shared with the hardware then release all
10534          * allocated resources.
10535          */
10536         lpfc_sli4_queue_unset(phba);
10537         lpfc_sli4_queue_destroy(phba);
10538
10539         /* Reset SLI4 HBA FCoE function */
10540         lpfc_pci_function_reset(phba);
10541
10542         /* Stop the SLI4 device port */
10543         phba->pport->work_port_events = 0;
10544 }
10545
10546  /**
10547  * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
10548  * @phba: Pointer to HBA context object.
10549  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
10550  *
10551  * This function is called in the SLI4 code path to read the port's
10552  * sli4 capabilities.
10553  *
10554  * This function may be be called from any context that can block-wait
10555  * for the completion.  The expectation is that this routine is called
10556  * typically from probe_one or from the online routine.
10557  **/
10558 int
10559 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10560 {
10561         int rc;
10562         struct lpfc_mqe *mqe;
10563         struct lpfc_pc_sli4_params *sli4_params;
10564         uint32_t mbox_tmo;
10565
10566         rc = 0;
10567         mqe = &mboxq->u.mqe;
10568
10569         /* Read the port's SLI4 Parameters port capabilities */
10570         lpfc_pc_sli4_params(mboxq);
10571         if (!phba->sli4_hba.intr_enable)
10572                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10573         else {
10574                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
10575                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
10576         }
10577
10578         if (unlikely(rc))
10579                 return 1;
10580
10581         sli4_params = &phba->sli4_hba.pc_sli4_params;
10582         sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
10583         sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
10584         sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
10585         sli4_params->featurelevel_1 = bf_get(featurelevel_1,
10586                                              &mqe->un.sli4_params);
10587         sli4_params->featurelevel_2 = bf_get(featurelevel_2,
10588                                              &mqe->un.sli4_params);
10589         sli4_params->proto_types = mqe->un.sli4_params.word3;
10590         sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
10591         sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
10592         sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
10593         sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
10594         sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
10595         sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
10596         sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
10597         sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
10598         sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
10599         sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
10600         sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
10601         sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
10602         sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
10603         sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
10604         sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
10605         sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
10606         sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
10607         sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
10608         sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
10609         sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
10610
10611         /* Make sure that sge_supp_len can be handled by the driver */
10612         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
10613                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
10614
10615         return rc;
10616 }
10617
10618 /**
10619  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
10620  * @phba: Pointer to HBA context object.
10621  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
10622  *
10623  * This function is called in the SLI4 code path to read the port's
10624  * sli4 capabilities.
10625  *
10626  * This function may be be called from any context that can block-wait
10627  * for the completion.  The expectation is that this routine is called
10628  * typically from probe_one or from the online routine.
10629  **/
10630 int
10631 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10632 {
10633         int rc;
10634         struct lpfc_mqe *mqe = &mboxq->u.mqe;
10635         struct lpfc_pc_sli4_params *sli4_params;
10636         uint32_t mbox_tmo;
10637         int length;
10638         bool exp_wqcq_pages = true;
10639         struct lpfc_sli4_parameters *mbx_sli4_parameters;
10640
10641         /*
10642          * By default, the driver assumes the SLI4 port requires RPI
10643          * header postings.  The SLI4_PARAM response will correct this
10644          * assumption.
10645          */
10646         phba->sli4_hba.rpi_hdrs_in_use = 1;
10647
10648         /* Read the port's SLI4 Config Parameters */
10649         length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
10650                   sizeof(struct lpfc_sli4_cfg_mhdr));
10651         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
10652                          LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
10653                          length, LPFC_SLI4_MBX_EMBED);
10654         if (!phba->sli4_hba.intr_enable)
10655                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10656         else {
10657                 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
10658                 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
10659         }
10660         if (unlikely(rc))
10661                 return rc;
10662         sli4_params = &phba->sli4_hba.pc_sli4_params;
10663         mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
10664         sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
10665         sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
10666         sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
10667         sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
10668                                              mbx_sli4_parameters);
10669         sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
10670                                              mbx_sli4_parameters);
10671         if (bf_get(cfg_phwq, mbx_sli4_parameters))
10672                 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
10673         else
10674                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
10675         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
10676         sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
10677         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
10678         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
10679         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
10680         sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
10681         sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
10682         sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
10683         sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
10684         sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
10685         sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
10686         sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
10687                                             mbx_sli4_parameters);
10688         sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
10689         sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
10690                                            mbx_sli4_parameters);
10691         phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
10692         phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
10693         phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
10694                               bf_get(cfg_xib, mbx_sli4_parameters));
10695
10696         if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
10697             !phba->nvme_support) {
10698                 phba->nvme_support = 0;
10699                 phba->nvmet_support = 0;
10700                 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
10701                 phba->cfg_nvme_io_channel = 0;
10702                 phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10703                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
10704                                 "6101 Disabling NVME support: "
10705                                 "Not supported by firmware: %d %d\n",
10706                                 bf_get(cfg_nvme, mbx_sli4_parameters),
10707                                 bf_get(cfg_xib, mbx_sli4_parameters));
10708
10709                 /* If firmware doesn't support NVME, just use SCSI support */
10710                 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
10711                         return -ENODEV;
10712                 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
10713         }
10714
10715         /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
10716         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
10717             LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
10718                 phba->cfg_enable_pbde = 0;
10719
10720         /*
10721          * To support Suppress Response feature we must satisfy 3 conditions.
10722          * lpfc_suppress_rsp module parameter must be set (default).
10723          * In SLI4-Parameters Descriptor:
10724          * Extended Inline Buffers (XIB) must be supported.
10725          * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
10726          * (double negative).
10727          */
10728         if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
10729             !(bf_get(cfg_nosr, mbx_sli4_parameters)))
10730                 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
10731         else
10732                 phba->cfg_suppress_rsp = 0;
10733
10734         if (bf_get(cfg_eqdr, mbx_sli4_parameters))
10735                 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
10736
10737         /* Make sure that sge_supp_len can be handled by the driver */
10738         if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
10739                 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
10740
10741         /*
10742          * Check whether the adapter supports an embedded copy of the
10743          * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
10744          * to use this option, 128-byte WQEs must be used.
10745          */
10746         if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
10747                 phba->fcp_embed_io = 1;
10748         else
10749                 phba->fcp_embed_io = 0;
10750
10751         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
10752                         "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
10753                         bf_get(cfg_xib, mbx_sli4_parameters),
10754                         phba->cfg_enable_pbde,
10755                         phba->fcp_embed_io, phba->nvme_support,
10756                         phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
10757
10758         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
10759             LPFC_SLI_INTF_IF_TYPE_2) &&
10760             (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
10761                  LPFC_SLI_INTF_FAMILY_LNCR_A0))
10762                 exp_wqcq_pages = false;
10763
10764         if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
10765             (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
10766             exp_wqcq_pages &&
10767             (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
10768                 phba->enab_exp_wqcq_pages = 1;
10769         else
10770                 phba->enab_exp_wqcq_pages = 0;
10771         /*
10772          * Check if the SLI port supports MDS Diagnostics
10773          */
10774         if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
10775                 phba->mds_diags_support = 1;
10776         else
10777                 phba->mds_diags_support = 0;
10778
10779         return 0;
10780 }
10781
10782 /**
10783  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
10784  * @pdev: pointer to PCI device
10785  * @pid: pointer to PCI device identifier
10786  *
10787  * This routine is to be called to attach a device with SLI-3 interface spec
10788  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
10789  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10790  * information of the device and driver to see if the driver state that it can
10791  * support this kind of device. If the match is successful, the driver core
10792  * invokes this routine. If this routine determines it can claim the HBA, it
10793  * does all the initialization that it needs to do to handle the HBA properly.
10794  *
10795  * Return code
10796  *      0 - driver can claim the device
10797  *      negative value - driver can not claim the device
10798  **/
10799 static int
10800 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
10801 {
10802         struct lpfc_hba   *phba;
10803         struct lpfc_vport *vport = NULL;
10804         struct Scsi_Host  *shost = NULL;
10805         int error;
10806         uint32_t cfg_mode, intr_mode;
10807
10808         /* Allocate memory for HBA structure */
10809         phba = lpfc_hba_alloc(pdev);
10810         if (!phba)
10811                 return -ENOMEM;
10812
10813         /* Perform generic PCI device enabling operation */
10814         error = lpfc_enable_pci_dev(phba);
10815         if (error)
10816                 goto out_free_phba;
10817
10818         /* Set up SLI API function jump table for PCI-device group-0 HBAs */
10819         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
10820         if (error)
10821                 goto out_disable_pci_dev;
10822
10823         /* Set up SLI-3 specific device PCI memory space */
10824         error = lpfc_sli_pci_mem_setup(phba);
10825         if (error) {
10826                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10827                                 "1402 Failed to set up pci memory space.\n");
10828                 goto out_disable_pci_dev;
10829         }
10830
10831         /* Set up SLI-3 specific device driver resources */
10832         error = lpfc_sli_driver_resource_setup(phba);
10833         if (error) {
10834                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10835                                 "1404 Failed to set up driver resource.\n");
10836                 goto out_unset_pci_mem_s3;
10837         }
10838
10839         /* Initialize and populate the iocb list per host */
10840
10841         error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
10842         if (error) {
10843                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10844                                 "1405 Failed to initialize iocb list.\n");
10845                 goto out_unset_driver_resource_s3;
10846         }
10847
10848         /* Set up common device driver resources */
10849         error = lpfc_setup_driver_resource_phase2(phba);
10850         if (error) {
10851                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10852                                 "1406 Failed to set up driver resource.\n");
10853                 goto out_free_iocb_list;
10854         }
10855
10856         /* Get the default values for Model Name and Description */
10857         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10858
10859         /* Create SCSI host to the physical port */
10860         error = lpfc_create_shost(phba);
10861         if (error) {
10862                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10863                                 "1407 Failed to create scsi host.\n");
10864                 goto out_unset_driver_resource;
10865         }
10866
10867         /* Configure sysfs attributes */
10868         vport = phba->pport;
10869         error = lpfc_alloc_sysfs_attr(vport);
10870         if (error) {
10871                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10872                                 "1476 Failed to allocate sysfs attr\n");
10873                 goto out_destroy_shost;
10874         }
10875
10876         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
10877         /* Now, trying to enable interrupt and bring up the device */
10878         cfg_mode = phba->cfg_use_msi;
10879         while (true) {
10880                 /* Put device to a known state before enabling interrupt */
10881                 lpfc_stop_port(phba);
10882                 /* Configure and enable interrupt */
10883                 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
10884                 if (intr_mode == LPFC_INTR_ERROR) {
10885                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10886                                         "0431 Failed to enable interrupt.\n");
10887                         error = -ENODEV;
10888                         goto out_free_sysfs_attr;
10889                 }
10890                 /* SLI-3 HBA setup */
10891                 if (lpfc_sli_hba_setup(phba)) {
10892                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10893                                         "1477 Failed to set up hba\n");
10894                         error = -ENODEV;
10895                         goto out_remove_device;
10896                 }
10897
10898                 /* Wait 50ms for the interrupts of previous mailbox commands */
10899                 msleep(50);
10900                 /* Check active interrupts on message signaled interrupts */
10901                 if (intr_mode == 0 ||
10902                     phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
10903                         /* Log the current active interrupt mode */
10904                         phba->intr_mode = intr_mode;
10905                         lpfc_log_intr_mode(phba, intr_mode);
10906                         break;
10907                 } else {
10908                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10909                                         "0447 Configure interrupt mode (%d) "
10910                                         "failed active interrupt test.\n",
10911                                         intr_mode);
10912                         /* Disable the current interrupt mode */
10913                         lpfc_sli_disable_intr(phba);
10914                         /* Try next level of interrupt mode */
10915                         cfg_mode = --intr_mode;
10916                 }
10917         }
10918
10919         /* Perform post initialization setup */
10920         lpfc_post_init_setup(phba);
10921
10922         /* Check if there are static vports to be created. */
10923         lpfc_create_static_vport(phba);
10924
10925         return 0;
10926
10927 out_remove_device:
10928         lpfc_unset_hba(phba);
10929 out_free_sysfs_attr:
10930         lpfc_free_sysfs_attr(vport);
10931 out_destroy_shost:
10932         lpfc_destroy_shost(phba);
10933 out_unset_driver_resource:
10934         lpfc_unset_driver_resource_phase2(phba);
10935 out_free_iocb_list:
10936         lpfc_free_iocb_list(phba);
10937 out_unset_driver_resource_s3:
10938         lpfc_sli_driver_resource_unset(phba);
10939 out_unset_pci_mem_s3:
10940         lpfc_sli_pci_mem_unset(phba);
10941 out_disable_pci_dev:
10942         lpfc_disable_pci_dev(phba);
10943         if (shost)
10944                 scsi_host_put(shost);
10945 out_free_phba:
10946         lpfc_hba_free(phba);
10947         return error;
10948 }
10949
10950 /**
10951  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
10952  * @pdev: pointer to PCI device
10953  *
10954  * This routine is to be called to disattach a device with SLI-3 interface
10955  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
10956  * removed from PCI bus, it performs all the necessary cleanup for the HBA
10957  * device to be removed from the PCI subsystem properly.
10958  **/
10959 static void
10960 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
10961 {
10962         struct Scsi_Host  *shost = pci_get_drvdata(pdev);
10963         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
10964         struct lpfc_vport **vports;
10965         struct lpfc_hba   *phba = vport->phba;
10966         int i;
10967
10968         spin_lock_irq(&phba->hbalock);
10969         vport->load_flag |= FC_UNLOADING;
10970         spin_unlock_irq(&phba->hbalock);
10971
10972         lpfc_free_sysfs_attr(vport);
10973
10974         /* Release all the vports against this physical port */
10975         vports = lpfc_create_vport_work_array(phba);
10976         if (vports != NULL)
10977                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10978                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10979                                 continue;
10980                         fc_vport_terminate(vports[i]->fc_vport);
10981                 }
10982         lpfc_destroy_vport_work_array(phba, vports);
10983
10984         /* Remove FC host and then SCSI host with the physical port */
10985         fc_remove_host(shost);
10986         scsi_remove_host(shost);
10987
10988         lpfc_cleanup(vport);
10989
10990         /*
10991          * Bring down the SLI Layer. This step disable all interrupts,
10992          * clears the rings, discards all mailbox commands, and resets
10993          * the HBA.
10994          */
10995
10996         /* HBA interrupt will be disabled after this call */
10997         lpfc_sli_hba_down(phba);
10998         /* Stop kthread signal shall trigger work_done one more time */
10999         kthread_stop(phba->worker_thread);
11000         /* Final cleanup of txcmplq and reset the HBA */
11001         lpfc_sli_brdrestart(phba);
11002
11003         kfree(phba->vpi_bmask);
11004         kfree(phba->vpi_ids);
11005
11006         lpfc_stop_hba_timers(phba);
11007         spin_lock_irq(&phba->port_list_lock);
11008         list_del_init(&vport->listentry);
11009         spin_unlock_irq(&phba->port_list_lock);
11010
11011         lpfc_debugfs_terminate(vport);
11012
11013         /* Disable SR-IOV if enabled */
11014         if (phba->cfg_sriov_nr_virtfn)
11015                 pci_disable_sriov(pdev);
11016
11017         /* Disable interrupt */
11018         lpfc_sli_disable_intr(phba);
11019
11020         scsi_host_put(shost);
11021
11022         /*
11023          * Call scsi_free before mem_free since scsi bufs are released to their
11024          * corresponding pools here.
11025          */
11026         lpfc_scsi_free(phba);
11027         lpfc_mem_free_all(phba);
11028
11029         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
11030                           phba->hbqslimp.virt, phba->hbqslimp.phys);
11031
11032         /* Free resources associated with SLI2 interface */
11033         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
11034                           phba->slim2p.virt, phba->slim2p.phys);
11035
11036         /* unmap adapter SLIM and Control Registers */
11037         iounmap(phba->ctrl_regs_memmap_p);
11038         iounmap(phba->slim_memmap_p);
11039
11040         lpfc_hba_free(phba);
11041
11042         pci_release_mem_regions(pdev);
11043         pci_disable_device(pdev);
11044 }
11045
11046 /**
11047  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
11048  * @pdev: pointer to PCI device
11049  * @msg: power management message
11050  *
11051  * This routine is to be called from the kernel's PCI subsystem to support
11052  * system Power Management (PM) to device with SLI-3 interface spec. When
11053  * PM invokes this method, it quiesces the device by stopping the driver's
11054  * worker thread for the device, turning off device's interrupt and DMA,
11055  * and bring the device offline. Note that as the driver implements the
11056  * minimum PM requirements to a power-aware driver's PM support for the
11057  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
11058  * to the suspend() method call will be treated as SUSPEND and the driver will
11059  * fully reinitialize its device during resume() method call, the driver will
11060  * set device to PCI_D3hot state in PCI config space instead of setting it
11061  * according to the @msg provided by the PM.
11062  *
11063  * Return code
11064  *      0 - driver suspended the device
11065  *      Error otherwise
11066  **/
11067 static int
11068 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
11069 {
11070         struct Scsi_Host *shost = pci_get_drvdata(pdev);
11071         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11072
11073         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11074                         "0473 PCI device Power Management suspend.\n");
11075
11076         /* Bring down the device */
11077         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
11078         lpfc_offline(phba);
11079         kthread_stop(phba->worker_thread);
11080
11081         /* Disable interrupt from device */
11082         lpfc_sli_disable_intr(phba);
11083
11084         /* Save device state to PCI config space */
11085         pci_save_state(pdev);
11086         pci_set_power_state(pdev, PCI_D3hot);
11087
11088         return 0;
11089 }
11090
11091 /**
11092  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
11093  * @pdev: pointer to PCI device
11094  *
11095  * This routine is to be called from the kernel's PCI subsystem to support
11096  * system Power Management (PM) to device with SLI-3 interface spec. When PM
11097  * invokes this method, it restores the device's PCI config space state and
11098  * fully reinitializes the device and brings it online. Note that as the
11099  * driver implements the minimum PM requirements to a power-aware driver's
11100  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
11101  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
11102  * driver will fully reinitialize its device during resume() method call,
11103  * the device will be set to PCI_D0 directly in PCI config space before
11104  * restoring the state.
11105  *
11106  * Return code
11107  *      0 - driver suspended the device
11108  *      Error otherwise
11109  **/
11110 static int
11111 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
11112 {
11113         struct Scsi_Host *shost = pci_get_drvdata(pdev);
11114         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11115         uint32_t intr_mode;
11116         int error;
11117
11118         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11119                         "0452 PCI device Power Management resume.\n");
11120
11121         /* Restore device state from PCI config space */
11122         pci_set_power_state(pdev, PCI_D0);
11123         pci_restore_state(pdev);
11124
11125         /*
11126          * As the new kernel behavior of pci_restore_state() API call clears
11127          * device saved_state flag, need to save the restored state again.
11128          */
11129         pci_save_state(pdev);
11130
11131         if (pdev->is_busmaster)
11132                 pci_set_master(pdev);
11133
11134         /* Startup the kernel thread for this host adapter. */
11135         phba->worker_thread = kthread_run(lpfc_do_work, phba,
11136                                         "lpfc_worker_%d", phba->brd_no);
11137         if (IS_ERR(phba->worker_thread)) {
11138                 error = PTR_ERR(phba->worker_thread);
11139                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11140                                 "0434 PM resume failed to start worker "
11141                                 "thread: error=x%x.\n", error);
11142                 return error;
11143         }
11144
11145         /* Configure and enable interrupt */
11146         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
11147         if (intr_mode == LPFC_INTR_ERROR) {
11148                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11149                                 "0430 PM resume Failed to enable interrupt\n");
11150                 return -EIO;
11151         } else
11152                 phba->intr_mode = intr_mode;
11153
11154         /* Restart HBA and bring it online */
11155         lpfc_sli_brdrestart(phba);
11156         lpfc_online(phba);
11157
11158         /* Log the current active interrupt mode */
11159         lpfc_log_intr_mode(phba, phba->intr_mode);
11160
11161         return 0;
11162 }
11163
11164 /**
11165  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
11166  * @phba: pointer to lpfc hba data structure.
11167  *
11168  * This routine is called to prepare the SLI3 device for PCI slot recover. It
11169  * aborts all the outstanding SCSI I/Os to the pci device.
11170  **/
11171 static void
11172 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
11173 {
11174         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11175                         "2723 PCI channel I/O abort preparing for recovery\n");
11176
11177         /*
11178          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
11179          * and let the SCSI mid-layer to retry them to recover.
11180          */
11181         lpfc_sli_abort_fcp_rings(phba);
11182 }
11183
11184 /**
11185  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
11186  * @phba: pointer to lpfc hba data structure.
11187  *
11188  * This routine is called to prepare the SLI3 device for PCI slot reset. It
11189  * disables the device interrupt and pci device, and aborts the internal FCP
11190  * pending I/Os.
11191  **/
11192 static void
11193 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
11194 {
11195         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11196                         "2710 PCI channel disable preparing for reset\n");
11197
11198         /* Block any management I/Os to the device */
11199         lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
11200
11201         /* Block all SCSI devices' I/Os on the host */
11202         lpfc_scsi_dev_block(phba);
11203
11204         /* Flush all driver's outstanding SCSI I/Os as we are to reset */
11205         lpfc_sli_flush_fcp_rings(phba);
11206
11207         /* stop all timers */
11208         lpfc_stop_hba_timers(phba);
11209
11210         /* Disable interrupt and pci device */
11211         lpfc_sli_disable_intr(phba);
11212         pci_disable_device(phba->pcidev);
11213 }
11214
11215 /**
11216  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
11217  * @phba: pointer to lpfc hba data structure.
11218  *
11219  * This routine is called to prepare the SLI3 device for PCI slot permanently
11220  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
11221  * pending I/Os.
11222  **/
11223 static void
11224 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
11225 {
11226         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11227                         "2711 PCI channel permanent disable for failure\n");
11228         /* Block all SCSI devices' I/Os on the host */
11229         lpfc_scsi_dev_block(phba);
11230
11231         /* stop all timers */
11232         lpfc_stop_hba_timers(phba);
11233
11234         /* Clean up all driver's outstanding SCSI I/Os */
11235         lpfc_sli_flush_fcp_rings(phba);
11236 }
11237
11238 /**
11239  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
11240  * @pdev: pointer to PCI device.
11241  * @state: the current PCI connection state.
11242  *
11243  * This routine is called from the PCI subsystem for I/O error handling to
11244  * device with SLI-3 interface spec. This function is called by the PCI
11245  * subsystem after a PCI bus error affecting this device has been detected.
11246  * When this function is invoked, it will need to stop all the I/Os and
11247  * interrupt(s) to the device. Once that is done, it will return
11248  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
11249  * as desired.
11250  *
11251  * Return codes
11252  *      PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
11253  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11254  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11255  **/
11256 static pci_ers_result_t
11257 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
11258 {
11259         struct Scsi_Host *shost = pci_get_drvdata(pdev);
11260         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11261
11262         switch (state) {
11263         case pci_channel_io_normal:
11264                 /* Non-fatal error, prepare for recovery */
11265                 lpfc_sli_prep_dev_for_recover(phba);
11266                 return PCI_ERS_RESULT_CAN_RECOVER;
11267         case pci_channel_io_frozen:
11268                 /* Fatal error, prepare for slot reset */
11269                 lpfc_sli_prep_dev_for_reset(phba);
11270                 return PCI_ERS_RESULT_NEED_RESET;
11271         case pci_channel_io_perm_failure:
11272                 /* Permanent failure, prepare for device down */
11273                 lpfc_sli_prep_dev_for_perm_failure(phba);
11274                 return PCI_ERS_RESULT_DISCONNECT;
11275         default:
11276                 /* Unknown state, prepare and request slot reset */
11277                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11278                                 "0472 Unknown PCI error state: x%x\n", state);
11279                 lpfc_sli_prep_dev_for_reset(phba);
11280                 return PCI_ERS_RESULT_NEED_RESET;
11281         }
11282 }
11283
11284 /**
11285  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
11286  * @pdev: pointer to PCI device.
11287  *
11288  * This routine is called from the PCI subsystem for error handling to
11289  * device with SLI-3 interface spec. This is called after PCI bus has been
11290  * reset to restart the PCI card from scratch, as if from a cold-boot.
11291  * During the PCI subsystem error recovery, after driver returns
11292  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
11293  * recovery and then call this routine before calling the .resume method
11294  * to recover the device. This function will initialize the HBA device,
11295  * enable the interrupt, but it will just put the HBA to offline state
11296  * without passing any I/O traffic.
11297  *
11298  * Return codes
11299  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
11300  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11301  */
11302 static pci_ers_result_t
11303 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
11304 {
11305         struct Scsi_Host *shost = pci_get_drvdata(pdev);
11306         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11307         struct lpfc_sli *psli = &phba->sli;
11308         uint32_t intr_mode;
11309
11310         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
11311         if (pci_enable_device_mem(pdev)) {
11312                 printk(KERN_ERR "lpfc: Cannot re-enable "
11313                         "PCI device after reset.\n");
11314                 return PCI_ERS_RESULT_DISCONNECT;
11315         }
11316
11317         pci_restore_state(pdev);
11318
11319         /*
11320          * As the new kernel behavior of pci_restore_state() API call clears
11321          * device saved_state flag, need to save the restored state again.
11322          */
11323         pci_save_state(pdev);
11324
11325         if (pdev->is_busmaster)
11326                 pci_set_master(pdev);
11327
11328         spin_lock_irq(&phba->hbalock);
11329         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
11330         spin_unlock_irq(&phba->hbalock);
11331
11332         /* Configure and enable interrupt */
11333         intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
11334         if (intr_mode == LPFC_INTR_ERROR) {
11335                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11336                                 "0427 Cannot re-enable interrupt after "
11337                                 "slot reset.\n");
11338                 return PCI_ERS_RESULT_DISCONNECT;
11339         } else
11340                 phba->intr_mode = intr_mode;
11341
11342         /* Take device offline, it will perform cleanup */
11343         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
11344         lpfc_offline(phba);
11345         lpfc_sli_brdrestart(phba);
11346
11347         /* Log the current active interrupt mode */
11348         lpfc_log_intr_mode(phba, phba->intr_mode);
11349
11350         return PCI_ERS_RESULT_RECOVERED;
11351 }
11352
11353 /**
11354  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
11355  * @pdev: pointer to PCI device
11356  *
11357  * This routine is called from the PCI subsystem for error handling to device
11358  * with SLI-3 interface spec. It is called when kernel error recovery tells
11359  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
11360  * error recovery. After this call, traffic can start to flow from this device
11361  * again.
11362  */
11363 static void
11364 lpfc_io_resume_s3(struct pci_dev *pdev)
11365 {
11366         struct Scsi_Host *shost = pci_get_drvdata(pdev);
11367         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11368
11369         /* Bring device online, it will be no-op for non-fatal error resume */
11370         lpfc_online(phba);
11371 }
11372
11373 /**
11374  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
11375  * @phba: pointer to lpfc hba data structure.
11376  *
11377  * returns the number of ELS/CT IOCBs to reserve
11378  **/
11379 int
11380 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
11381 {
11382         int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
11383
11384         if (phba->sli_rev == LPFC_SLI_REV4) {
11385                 if (max_xri <= 100)
11386                         return 10;
11387                 else if (max_xri <= 256)
11388                         return 25;
11389                 else if (max_xri <= 512)
11390                         return 50;
11391                 else if (max_xri <= 1024)
11392                         return 100;
11393                 else if (max_xri <= 1536)
11394                         return 150;
11395                 else if (max_xri <= 2048)
11396                         return 200;
11397                 else
11398                         return 250;
11399         } else
11400                 return 0;
11401 }
11402
11403 /**
11404  * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
11405  * @phba: pointer to lpfc hba data structure.
11406  *
11407  * returns the number of ELS/CT + NVMET IOCBs to reserve
11408  **/
11409 int
11410 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
11411 {
11412         int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
11413
11414         if (phba->nvmet_support)
11415                 max_xri += LPFC_NVMET_BUF_POST;
11416         return max_xri;
11417 }
11418
11419
11420 static void
11421 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
11422         uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
11423         const struct firmware *fw)
11424 {
11425         if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
11426             (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
11427              magic_number != MAGIC_NUMER_G6) ||
11428             (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
11429              magic_number != MAGIC_NUMER_G7))
11430                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11431                         "3030 This firmware version is not supported on "
11432                         "this HBA model. Device:%x Magic:%x Type:%x "
11433                         "ID:%x Size %d %zd\n",
11434                         phba->pcidev->device, magic_number, ftype, fid,
11435                         fsize, fw->size);
11436         else
11437                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11438                         "3022 FW Download failed. Device:%x Magic:%x Type:%x "
11439                         "ID:%x Size %d %zd\n",
11440                         phba->pcidev->device, magic_number, ftype, fid,
11441                         fsize, fw->size);
11442 }
11443
11444
11445 /**
11446  * lpfc_write_firmware - attempt to write a firmware image to the port
11447  * @fw: pointer to firmware image returned from request_firmware.
11448  * @phba: pointer to lpfc hba data structure.
11449  *
11450  **/
11451 static void
11452 lpfc_write_firmware(const struct firmware *fw, void *context)
11453 {
11454         struct lpfc_hba *phba = (struct lpfc_hba *)context;
11455         char fwrev[FW_REV_STR_SIZE];
11456         struct lpfc_grp_hdr *image;
11457         struct list_head dma_buffer_list;
11458         int i, rc = 0;
11459         struct lpfc_dmabuf *dmabuf, *next;
11460         uint32_t offset = 0, temp_offset = 0;
11461         uint32_t magic_number, ftype, fid, fsize;
11462
11463         /* It can be null in no-wait mode, sanity check */
11464         if (!fw) {
11465                 rc = -ENXIO;
11466                 goto out;
11467         }
11468         image = (struct lpfc_grp_hdr *)fw->data;
11469
11470         magic_number = be32_to_cpu(image->magic_number);
11471         ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
11472         fid = bf_get_be32(lpfc_grp_hdr_id, image);
11473         fsize = be32_to_cpu(image->size);
11474
11475         INIT_LIST_HEAD(&dma_buffer_list);
11476         lpfc_decode_firmware_rev(phba, fwrev, 1);
11477         if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
11478                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11479                                 "3023 Updating Firmware, Current Version:%s "
11480                                 "New Version:%s\n",
11481                                 fwrev, image->revision);
11482                 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
11483                         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
11484                                          GFP_KERNEL);
11485                         if (!dmabuf) {
11486                                 rc = -ENOMEM;
11487                                 goto release_out;
11488                         }
11489                         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
11490                                                           SLI4_PAGE_SIZE,
11491                                                           &dmabuf->phys,
11492                                                           GFP_KERNEL);
11493                         if (!dmabuf->virt) {
11494                                 kfree(dmabuf);
11495                                 rc = -ENOMEM;
11496                                 goto release_out;
11497                         }
11498                         list_add_tail(&dmabuf->list, &dma_buffer_list);
11499                 }
11500                 while (offset < fw->size) {
11501                         temp_offset = offset;
11502                         list_for_each_entry(dmabuf, &dma_buffer_list, list) {
11503                                 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
11504                                         memcpy(dmabuf->virt,
11505                                                fw->data + temp_offset,
11506                                                fw->size - temp_offset);
11507                                         temp_offset = fw->size;
11508                                         break;
11509                                 }
11510                                 memcpy(dmabuf->virt, fw->data + temp_offset,
11511                                        SLI4_PAGE_SIZE);
11512                                 temp_offset += SLI4_PAGE_SIZE;
11513                         }
11514                         rc = lpfc_wr_object(phba, &dma_buffer_list,
11515                                     (fw->size - offset), &offset);
11516                         if (rc) {
11517                                 lpfc_log_write_firmware_error(phba, offset,
11518                                         magic_number, ftype, fid, fsize, fw);
11519                                 goto release_out;
11520                         }
11521                 }
11522                 rc = offset;
11523         } else
11524                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11525                                 "3029 Skipped Firmware update, Current "
11526                                 "Version:%s New Version:%s\n",
11527                                 fwrev, image->revision);
11528
11529 release_out:
11530         list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
11531                 list_del(&dmabuf->list);
11532                 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
11533                                   dmabuf->virt, dmabuf->phys);
11534                 kfree(dmabuf);
11535         }
11536         release_firmware(fw);
11537 out:
11538         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11539                         "3024 Firmware update done: %d.\n", rc);
11540         return;
11541 }
11542
11543 /**
11544  * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
11545  * @phba: pointer to lpfc hba data structure.
11546  *
11547  * This routine is called to perform Linux generic firmware upgrade on device
11548  * that supports such feature.
11549  **/
11550 int
11551 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
11552 {
11553         uint8_t file_name[ELX_MODEL_NAME_SIZE];
11554         int ret;
11555         const struct firmware *fw;
11556
11557         /* Only supported on SLI4 interface type 2 for now */
11558         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
11559             LPFC_SLI_INTF_IF_TYPE_2)
11560                 return -EPERM;
11561
11562         snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
11563
11564         if (fw_upgrade == INT_FW_UPGRADE) {
11565                 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
11566                                         file_name, &phba->pcidev->dev,
11567                                         GFP_KERNEL, (void *)phba,
11568                                         lpfc_write_firmware);
11569         } else if (fw_upgrade == RUN_FW_UPGRADE) {
11570                 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
11571                 if (!ret)
11572                         lpfc_write_firmware(fw, (void *)phba);
11573         } else {
11574                 ret = -EINVAL;
11575         }
11576
11577         return ret;
11578 }
11579
11580 /**
11581  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
11582  * @pdev: pointer to PCI device
11583  * @pid: pointer to PCI device identifier
11584  *
11585  * This routine is called from the kernel's PCI subsystem to device with
11586  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
11587  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
11588  * information of the device and driver to see if the driver state that it
11589  * can support this kind of device. If the match is successful, the driver
11590  * core invokes this routine. If this routine determines it can claim the HBA,
11591  * it does all the initialization that it needs to do to handle the HBA
11592  * properly.
11593  *
11594  * Return code
11595  *      0 - driver can claim the device
11596  *      negative value - driver can not claim the device
11597  **/
11598 static int
11599 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11600 {
11601         struct lpfc_hba   *phba;
11602         struct lpfc_vport *vport = NULL;
11603         struct Scsi_Host  *shost = NULL;
11604         int error;
11605         uint32_t cfg_mode, intr_mode;
11606
11607         /* Allocate memory for HBA structure */
11608         phba = lpfc_hba_alloc(pdev);
11609         if (!phba)
11610                 return -ENOMEM;
11611
11612         /* Perform generic PCI device enabling operation */
11613         error = lpfc_enable_pci_dev(phba);
11614         if (error)
11615                 goto out_free_phba;
11616
11617         /* Set up SLI API function jump table for PCI-device group-1 HBAs */
11618         error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
11619         if (error)
11620                 goto out_disable_pci_dev;
11621
11622         /* Set up SLI-4 specific device PCI memory space */
11623         error = lpfc_sli4_pci_mem_setup(phba);
11624         if (error) {
11625                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11626                                 "1410 Failed to set up pci memory space.\n");
11627                 goto out_disable_pci_dev;
11628         }
11629
11630         /* Set up SLI-4 Specific device driver resources */
11631         error = lpfc_sli4_driver_resource_setup(phba);
11632         if (error) {
11633                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11634                                 "1412 Failed to set up driver resource.\n");
11635                 goto out_unset_pci_mem_s4;
11636         }
11637
11638         INIT_LIST_HEAD(&phba->active_rrq_list);
11639         INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
11640
11641         /* Set up common device driver resources */
11642         error = lpfc_setup_driver_resource_phase2(phba);
11643         if (error) {
11644                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11645                                 "1414 Failed to set up driver resource.\n");
11646                 goto out_unset_driver_resource_s4;
11647         }
11648
11649         /* Get the default values for Model Name and Description */
11650         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
11651
11652         /* Create SCSI host to the physical port */
11653         error = lpfc_create_shost(phba);
11654         if (error) {
11655                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11656                                 "1415 Failed to create scsi host.\n");
11657                 goto out_unset_driver_resource;
11658         }
11659
11660         /* Configure sysfs attributes */
11661         vport = phba->pport;
11662         error = lpfc_alloc_sysfs_attr(vport);
11663         if (error) {
11664                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11665                                 "1416 Failed to allocate sysfs attr\n");
11666                 goto out_destroy_shost;
11667         }
11668
11669         shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
11670         /* Now, trying to enable interrupt and bring up the device */
11671         cfg_mode = phba->cfg_use_msi;
11672
11673         /* Put device to a known state before enabling interrupt */
11674         lpfc_stop_port(phba);
11675
11676         /* Configure and enable interrupt */
11677         intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
11678         if (intr_mode == LPFC_INTR_ERROR) {
11679                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11680                                 "0426 Failed to enable interrupt.\n");
11681                 error = -ENODEV;
11682                 goto out_free_sysfs_attr;
11683         }
11684         /* Default to single EQ for non-MSI-X */
11685         if (phba->intr_type != MSIX) {
11686                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
11687                         phba->cfg_fcp_io_channel = 1;
11688                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11689                         phba->cfg_nvme_io_channel = 1;
11690                         if (phba->nvmet_support)
11691                                 phba->cfg_nvmet_mrq = 1;
11692                 }
11693                 phba->io_channel_irqs = 1;
11694         }
11695
11696         /* Set up SLI-4 HBA */
11697         if (lpfc_sli4_hba_setup(phba)) {
11698                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11699                                 "1421 Failed to set up hba\n");
11700                 error = -ENODEV;
11701                 goto out_disable_intr;
11702         }
11703
11704         /* Log the current active interrupt mode */
11705         phba->intr_mode = intr_mode;
11706         lpfc_log_intr_mode(phba, intr_mode);
11707
11708         /* Perform post initialization setup */
11709         lpfc_post_init_setup(phba);
11710
11711         /* NVME support in FW earlier in the driver load corrects the
11712          * FC4 type making a check for nvme_support unnecessary.
11713          */
11714         if ((phba->nvmet_support == 0) &&
11715             (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
11716                 /* Create NVME binding with nvme_fc_transport. This
11717                  * ensures the vport is initialized.  If the localport
11718                  * create fails, it should not unload the driver to
11719                  * support field issues.
11720                  */
11721                 error = lpfc_nvme_create_localport(vport);
11722                 if (error) {
11723                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11724                                         "6004 NVME registration failed, "
11725                                         "error x%x\n",
11726                                         error);
11727                 }
11728         }
11729
11730         /* check for firmware upgrade or downgrade */
11731         if (phba->cfg_request_firmware_upgrade)
11732                 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
11733
11734         /* Check if there are static vports to be created. */
11735         lpfc_create_static_vport(phba);
11736
11737         /* Enable RAS FW log support */
11738         lpfc_sli4_ras_setup(phba);
11739
11740         return 0;
11741
11742 out_disable_intr:
11743         lpfc_sli4_disable_intr(phba);
11744 out_free_sysfs_attr:
11745         lpfc_free_sysfs_attr(vport);
11746 out_destroy_shost:
11747         lpfc_destroy_shost(phba);
11748 out_unset_driver_resource:
11749         lpfc_unset_driver_resource_phase2(phba);
11750 out_unset_driver_resource_s4:
11751         lpfc_sli4_driver_resource_unset(phba);
11752 out_unset_pci_mem_s4:
11753         lpfc_sli4_pci_mem_unset(phba);
11754 out_disable_pci_dev:
11755         lpfc_disable_pci_dev(phba);
11756         if (shost)
11757                 scsi_host_put(shost);
11758 out_free_phba:
11759         lpfc_hba_free(phba);
11760         return error;
11761 }
11762
11763 /**
11764  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
11765  * @pdev: pointer to PCI device
11766  *
11767  * This routine is called from the kernel's PCI subsystem to device with
11768  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
11769  * removed from PCI bus, it performs all the necessary cleanup for the HBA
11770  * device to be removed from the PCI subsystem properly.
11771  **/
11772 static void
11773 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
11774 {
11775         struct Scsi_Host *shost = pci_get_drvdata(pdev);
11776         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
11777         struct lpfc_vport **vports;
11778         struct lpfc_hba *phba = vport->phba;
11779         int i;
11780
11781         /* Mark the device unloading flag */
11782         spin_lock_irq(&phba->hbalock);
11783         vport->load_flag |= FC_UNLOADING;
11784         spin_unlock_irq(&phba->hbalock);
11785
11786         /* Free the HBA sysfs attributes */
11787         lpfc_free_sysfs_attr(vport);
11788
11789         /* Release all the vports against this physical port */
11790         vports = lpfc_create_vport_work_array(phba);
11791         if (vports != NULL)
11792                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
11793                         if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
11794                                 continue;
11795                         fc_vport_terminate(vports[i]->fc_vport);
11796                 }
11797         lpfc_destroy_vport_work_array(phba, vports);
11798
11799         /* Remove FC host and then SCSI host with the physical port */
11800         fc_remove_host(shost);
11801         scsi_remove_host(shost);
11802
11803         /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
11804          * localports are destroyed after to cleanup all transport memory.
11805          */
11806         lpfc_cleanup(vport);
11807         lpfc_nvmet_destroy_targetport(phba);
11808         lpfc_nvme_destroy_localport(vport);
11809
11810         /*
11811          * Bring down the SLI Layer. This step disables all interrupts,
11812          * clears the rings, discards all mailbox commands, and resets
11813          * the HBA FCoE function.
11814          */
11815         lpfc_debugfs_terminate(vport);
11816         lpfc_sli4_hba_unset(phba);
11817
11818         lpfc_stop_hba_timers(phba);
11819         spin_lock_irq(&phba->port_list_lock);
11820         list_del_init(&vport->listentry);
11821         spin_unlock_irq(&phba->port_list_lock);
11822
11823         /* Perform scsi free before driver resource_unset since scsi
11824          * buffers are released to their corresponding pools here.
11825          */
11826         lpfc_scsi_free(phba);
11827         lpfc_nvme_free(phba);
11828         lpfc_free_iocb_list(phba);
11829
11830         lpfc_unset_driver_resource_phase2(phba);
11831         lpfc_sli4_driver_resource_unset(phba);
11832
11833         /* Unmap adapter Control and Doorbell registers */
11834         lpfc_sli4_pci_mem_unset(phba);
11835
11836         /* Release PCI resources and disable device's PCI function */
11837         scsi_host_put(shost);
11838         lpfc_disable_pci_dev(phba);
11839
11840         /* Finally, free the driver's device data structure */
11841         lpfc_hba_free(phba);
11842
11843         return;
11844 }
11845
11846 /**
11847  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
11848  * @pdev: pointer to PCI device
11849  * @msg: power management message
11850  *
11851  * This routine is called from the kernel's PCI subsystem to support system
11852  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
11853  * this method, it quiesces the device by stopping the driver's worker
11854  * thread for the device, turning off device's interrupt and DMA, and bring
11855  * the device offline. Note that as the driver implements the minimum PM
11856  * requirements to a power-aware driver's PM support for suspend/resume -- all
11857  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
11858  * method call will be treated as SUSPEND and the driver will fully
11859  * reinitialize its device during resume() method call, the driver will set
11860  * device to PCI_D3hot state in PCI config space instead of setting it
11861  * according to the @msg provided by the PM.
11862  *
11863  * Return code
11864  *      0 - driver suspended the device
11865  *      Error otherwise
11866  **/
11867 static int
11868 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
11869 {
11870         struct Scsi_Host *shost = pci_get_drvdata(pdev);
11871         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11872
11873         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11874                         "2843 PCI device Power Management suspend.\n");
11875
11876         /* Bring down the device */
11877         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
11878         lpfc_offline(phba);
11879         kthread_stop(phba->worker_thread);
11880
11881         /* Disable interrupt from device */
11882         lpfc_sli4_disable_intr(phba);
11883         lpfc_sli4_queue_destroy(phba);
11884
11885         /* Save device state to PCI config space */
11886         pci_save_state(pdev);
11887         pci_set_power_state(pdev, PCI_D3hot);
11888
11889         return 0;
11890 }
11891
11892 /**
11893  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
11894  * @pdev: pointer to PCI device
11895  *
11896  * This routine is called from the kernel's PCI subsystem to support system
11897  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
11898  * this method, it restores the device's PCI config space state and fully
11899  * reinitializes the device and brings it online. Note that as the driver
11900  * implements the minimum PM requirements to a power-aware driver's PM for
11901  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
11902  * to the suspend() method call will be treated as SUSPEND and the driver
11903  * will fully reinitialize its device during resume() method call, the device
11904  * will be set to PCI_D0 directly in PCI config space before restoring the
11905  * state.
11906  *
11907  * Return code
11908  *      0 - driver suspended the device
11909  *      Error otherwise
11910  **/
11911 static int
11912 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
11913 {
11914         struct Scsi_Host *shost = pci_get_drvdata(pdev);
11915         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11916         uint32_t intr_mode;
11917         int error;
11918
11919         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11920                         "0292 PCI device Power Management resume.\n");
11921
11922         /* Restore device state from PCI config space */
11923         pci_set_power_state(pdev, PCI_D0);
11924         pci_restore_state(pdev);
11925
11926         /*
11927          * As the new kernel behavior of pci_restore_state() API call clears
11928          * device saved_state flag, need to save the restored state again.
11929          */
11930         pci_save_state(pdev);
11931
11932         if (pdev->is_busmaster)
11933                 pci_set_master(pdev);
11934
11935          /* Startup the kernel thread for this host adapter. */
11936         phba->worker_thread = kthread_run(lpfc_do_work, phba,
11937                                         "lpfc_worker_%d", phba->brd_no);
11938         if (IS_ERR(phba->worker_thread)) {
11939                 error = PTR_ERR(phba->worker_thread);
11940                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11941                                 "0293 PM resume failed to start worker "
11942                                 "thread: error=x%x.\n", error);
11943                 return error;
11944         }
11945
11946         /* Configure and enable interrupt */
11947         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
11948         if (intr_mode == LPFC_INTR_ERROR) {
11949                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11950                                 "0294 PM resume Failed to enable interrupt\n");
11951                 return -EIO;
11952         } else
11953                 phba->intr_mode = intr_mode;
11954
11955         /* Restart HBA and bring it online */
11956         lpfc_sli_brdrestart(phba);
11957         lpfc_online(phba);
11958
11959         /* Log the current active interrupt mode */
11960         lpfc_log_intr_mode(phba, phba->intr_mode);
11961
11962         return 0;
11963 }
11964
11965 /**
11966  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
11967  * @phba: pointer to lpfc hba data structure.
11968  *
11969  * This routine is called to prepare the SLI4 device for PCI slot recover. It
11970  * aborts all the outstanding SCSI I/Os to the pci device.
11971  **/
11972 static void
11973 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
11974 {
11975         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11976                         "2828 PCI channel I/O abort preparing for recovery\n");
11977         /*
11978          * There may be errored I/Os through HBA, abort all I/Os on txcmplq
11979          * and let the SCSI mid-layer to retry them to recover.
11980          */
11981         lpfc_sli_abort_fcp_rings(phba);
11982 }
11983
11984 /**
11985  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
11986  * @phba: pointer to lpfc hba data structure.
11987  *
11988  * This routine is called to prepare the SLI4 device for PCI slot reset. It
11989  * disables the device interrupt and pci device, and aborts the internal FCP
11990  * pending I/Os.
11991  **/
11992 static void
11993 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
11994 {
11995         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11996                         "2826 PCI channel disable preparing for reset\n");
11997
11998         /* Block any management I/Os to the device */
11999         lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
12000
12001         /* Block all SCSI devices' I/Os on the host */
12002         lpfc_scsi_dev_block(phba);
12003
12004         /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12005         lpfc_sli_flush_fcp_rings(phba);
12006
12007         /* Flush the outstanding NVME IOs if fc4 type enabled. */
12008         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12009                 lpfc_sli_flush_nvme_rings(phba);
12010
12011         /* stop all timers */
12012         lpfc_stop_hba_timers(phba);
12013
12014         /* Disable interrupt and pci device */
12015         lpfc_sli4_disable_intr(phba);
12016         lpfc_sli4_queue_destroy(phba);
12017         pci_disable_device(phba->pcidev);
12018 }
12019
12020 /**
12021  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
12022  * @phba: pointer to lpfc hba data structure.
12023  *
12024  * This routine is called to prepare the SLI4 device for PCI slot permanently
12025  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
12026  * pending I/Os.
12027  **/
12028 static void
12029 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12030 {
12031         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12032                         "2827 PCI channel permanent disable for failure\n");
12033
12034         /* Block all SCSI devices' I/Os on the host */
12035         lpfc_scsi_dev_block(phba);
12036
12037         /* stop all timers */
12038         lpfc_stop_hba_timers(phba);
12039
12040         /* Clean up all driver's outstanding SCSI I/Os */
12041         lpfc_sli_flush_fcp_rings(phba);
12042
12043         /* Flush the outstanding NVME IOs if fc4 type enabled. */
12044         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12045                 lpfc_sli_flush_nvme_rings(phba);
12046 }
12047
12048 /**
12049  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
12050  * @pdev: pointer to PCI device.
12051  * @state: the current PCI connection state.
12052  *
12053  * This routine is called from the PCI subsystem for error handling to device
12054  * with SLI-4 interface spec. This function is called by the PCI subsystem
12055  * after a PCI bus error affecting this device has been detected. When this
12056  * function is invoked, it will need to stop all the I/Os and interrupt(s)
12057  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
12058  * for the PCI subsystem to perform proper recovery as desired.
12059  *
12060  * Return codes
12061  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12062  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12063  **/
12064 static pci_ers_result_t
12065 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
12066 {
12067         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12068         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12069
12070         switch (state) {
12071         case pci_channel_io_normal:
12072                 /* Non-fatal error, prepare for recovery */
12073                 lpfc_sli4_prep_dev_for_recover(phba);
12074                 return PCI_ERS_RESULT_CAN_RECOVER;
12075         case pci_channel_io_frozen:
12076                 /* Fatal error, prepare for slot reset */
12077                 lpfc_sli4_prep_dev_for_reset(phba);
12078                 return PCI_ERS_RESULT_NEED_RESET;
12079         case pci_channel_io_perm_failure:
12080                 /* Permanent failure, prepare for device down */
12081                 lpfc_sli4_prep_dev_for_perm_failure(phba);
12082                 return PCI_ERS_RESULT_DISCONNECT;
12083         default:
12084                 /* Unknown state, prepare and request slot reset */
12085                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12086                                 "2825 Unknown PCI error state: x%x\n", state);
12087                 lpfc_sli4_prep_dev_for_reset(phba);
12088                 return PCI_ERS_RESULT_NEED_RESET;
12089         }
12090 }
12091
12092 /**
12093  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
12094  * @pdev: pointer to PCI device.
12095  *
12096  * This routine is called from the PCI subsystem for error handling to device
12097  * with SLI-4 interface spec. It is called after PCI bus has been reset to
12098  * restart the PCI card from scratch, as if from a cold-boot. During the
12099  * PCI subsystem error recovery, after the driver returns
12100  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
12101  * recovery and then call this routine before calling the .resume method to
12102  * recover the device. This function will initialize the HBA device, enable
12103  * the interrupt, but it will just put the HBA to offline state without
12104  * passing any I/O traffic.
12105  *
12106  * Return codes
12107  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
12108  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12109  */
12110 static pci_ers_result_t
12111 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
12112 {
12113         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12114         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12115         struct lpfc_sli *psli = &phba->sli;
12116         uint32_t intr_mode;
12117
12118         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
12119         if (pci_enable_device_mem(pdev)) {
12120                 printk(KERN_ERR "lpfc: Cannot re-enable "
12121                         "PCI device after reset.\n");
12122                 return PCI_ERS_RESULT_DISCONNECT;
12123         }
12124
12125         pci_restore_state(pdev);
12126
12127         /*
12128          * As the new kernel behavior of pci_restore_state() API call clears
12129          * device saved_state flag, need to save the restored state again.
12130          */
12131         pci_save_state(pdev);
12132
12133         if (pdev->is_busmaster)
12134                 pci_set_master(pdev);
12135
12136         spin_lock_irq(&phba->hbalock);
12137         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
12138         spin_unlock_irq(&phba->hbalock);
12139
12140         /* Configure and enable interrupt */
12141         intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
12142         if (intr_mode == LPFC_INTR_ERROR) {
12143                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12144                                 "2824 Cannot re-enable interrupt after "
12145                                 "slot reset.\n");
12146                 return PCI_ERS_RESULT_DISCONNECT;
12147         } else
12148                 phba->intr_mode = intr_mode;
12149
12150         /* Log the current active interrupt mode */
12151         lpfc_log_intr_mode(phba, phba->intr_mode);
12152
12153         return PCI_ERS_RESULT_RECOVERED;
12154 }
12155
12156 /**
12157  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
12158  * @pdev: pointer to PCI device
12159  *
12160  * This routine is called from the PCI subsystem for error handling to device
12161  * with SLI-4 interface spec. It is called when kernel error recovery tells
12162  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
12163  * error recovery. After this call, traffic can start to flow from this device
12164  * again.
12165  **/
12166 static void
12167 lpfc_io_resume_s4(struct pci_dev *pdev)
12168 {
12169         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12170         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12171
12172         /*
12173          * In case of slot reset, as function reset is performed through
12174          * mailbox command which needs DMA to be enabled, this operation
12175          * has to be moved to the io resume phase. Taking device offline
12176          * will perform the necessary cleanup.
12177          */
12178         if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
12179                 /* Perform device reset */
12180                 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
12181                 lpfc_offline(phba);
12182                 lpfc_sli_brdrestart(phba);
12183                 /* Bring the device back online */
12184                 lpfc_online(phba);
12185         }
12186 }
12187
12188 /**
12189  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
12190  * @pdev: pointer to PCI device
12191  * @pid: pointer to PCI device identifier
12192  *
12193  * This routine is to be registered to the kernel's PCI subsystem. When an
12194  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
12195  * at PCI device-specific information of the device and driver to see if the
12196  * driver state that it can support this kind of device. If the match is
12197  * successful, the driver core invokes this routine. This routine dispatches
12198  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
12199  * do all the initialization that it needs to do to handle the HBA device
12200  * properly.
12201  *
12202  * Return code
12203  *      0 - driver can claim the device
12204  *      negative value - driver can not claim the device
12205  **/
12206 static int
12207 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
12208 {
12209         int rc;
12210         struct lpfc_sli_intf intf;
12211
12212         if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
12213                 return -ENODEV;
12214
12215         if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
12216             (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
12217                 rc = lpfc_pci_probe_one_s4(pdev, pid);
12218         else
12219                 rc = lpfc_pci_probe_one_s3(pdev, pid);
12220
12221         return rc;
12222 }
12223
12224 /**
12225  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
12226  * @pdev: pointer to PCI device
12227  *
12228  * This routine is to be registered to the kernel's PCI subsystem. When an
12229  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
12230  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
12231  * remove routine, which will perform all the necessary cleanup for the
12232  * device to be removed from the PCI subsystem properly.
12233  **/
12234 static void
12235 lpfc_pci_remove_one(struct pci_dev *pdev)
12236 {
12237         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12238         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12239
12240         switch (phba->pci_dev_grp) {
12241         case LPFC_PCI_DEV_LP:
12242                 lpfc_pci_remove_one_s3(pdev);
12243                 break;
12244         case LPFC_PCI_DEV_OC:
12245                 lpfc_pci_remove_one_s4(pdev);
12246                 break;
12247         default:
12248                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12249                                 "1424 Invalid PCI device group: 0x%x\n",
12250                                 phba->pci_dev_grp);
12251                 break;
12252         }
12253         return;
12254 }
12255
12256 /**
12257  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
12258  * @pdev: pointer to PCI device
12259  * @msg: power management message
12260  *
12261  * This routine is to be registered to the kernel's PCI subsystem to support
12262  * system Power Management (PM). When PM invokes this method, it dispatches
12263  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
12264  * suspend the device.
12265  *
12266  * Return code
12267  *      0 - driver suspended the device
12268  *      Error otherwise
12269  **/
12270 static int
12271 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
12272 {
12273         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12274         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12275         int rc = -ENODEV;
12276
12277         switch (phba->pci_dev_grp) {
12278         case LPFC_PCI_DEV_LP:
12279                 rc = lpfc_pci_suspend_one_s3(pdev, msg);
12280                 break;
12281         case LPFC_PCI_DEV_OC:
12282                 rc = lpfc_pci_suspend_one_s4(pdev, msg);
12283                 break;
12284         default:
12285                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12286                                 "1425 Invalid PCI device group: 0x%x\n",
12287                                 phba->pci_dev_grp);
12288                 break;
12289         }
12290         return rc;
12291 }
12292
12293 /**
12294  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
12295  * @pdev: pointer to PCI device
12296  *
12297  * This routine is to be registered to the kernel's PCI subsystem to support
12298  * system Power Management (PM). When PM invokes this method, it dispatches
12299  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
12300  * resume the device.
12301  *
12302  * Return code
12303  *      0 - driver suspended the device
12304  *      Error otherwise
12305  **/
12306 static int
12307 lpfc_pci_resume_one(struct pci_dev *pdev)
12308 {
12309         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12310         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12311         int rc = -ENODEV;
12312
12313         switch (phba->pci_dev_grp) {
12314         case LPFC_PCI_DEV_LP:
12315                 rc = lpfc_pci_resume_one_s3(pdev);
12316                 break;
12317         case LPFC_PCI_DEV_OC:
12318                 rc = lpfc_pci_resume_one_s4(pdev);
12319                 break;
12320         default:
12321                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12322                                 "1426 Invalid PCI device group: 0x%x\n",
12323                                 phba->pci_dev_grp);
12324                 break;
12325         }
12326         return rc;
12327 }
12328
12329 /**
12330  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
12331  * @pdev: pointer to PCI device.
12332  * @state: the current PCI connection state.
12333  *
12334  * This routine is registered to the PCI subsystem for error handling. This
12335  * function is called by the PCI subsystem after a PCI bus error affecting
12336  * this device has been detected. When this routine is invoked, it dispatches
12337  * the action to the proper SLI-3 or SLI-4 device error detected handling
12338  * routine, which will perform the proper error detected operation.
12339  *
12340  * Return codes
12341  *      PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
12342  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12343  **/
12344 static pci_ers_result_t
12345 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12346 {
12347         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12348         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12349         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
12350
12351         switch (phba->pci_dev_grp) {
12352         case LPFC_PCI_DEV_LP:
12353                 rc = lpfc_io_error_detected_s3(pdev, state);
12354                 break;
12355         case LPFC_PCI_DEV_OC:
12356                 rc = lpfc_io_error_detected_s4(pdev, state);
12357                 break;
12358         default:
12359                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12360                                 "1427 Invalid PCI device group: 0x%x\n",
12361                                 phba->pci_dev_grp);
12362                 break;
12363         }
12364         return rc;
12365 }
12366
12367 /**
12368  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
12369  * @pdev: pointer to PCI device.
12370  *
12371  * This routine is registered to the PCI subsystem for error handling. This
12372  * function is called after PCI bus has been reset to restart the PCI card
12373  * from scratch, as if from a cold-boot. When this routine is invoked, it
12374  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
12375  * routine, which will perform the proper device reset.
12376  *
12377  * Return codes
12378  *      PCI_ERS_RESULT_RECOVERED - the device has been recovered
12379  *      PCI_ERS_RESULT_DISCONNECT - device could not be recovered
12380  **/
12381 static pci_ers_result_t
12382 lpfc_io_slot_reset(struct pci_dev *pdev)
12383 {
12384         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12385         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12386         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
12387
12388         switch (phba->pci_dev_grp) {
12389         case LPFC_PCI_DEV_LP:
12390                 rc = lpfc_io_slot_reset_s3(pdev);
12391                 break;
12392         case LPFC_PCI_DEV_OC:
12393                 rc = lpfc_io_slot_reset_s4(pdev);
12394                 break;
12395         default:
12396                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12397                                 "1428 Invalid PCI device group: 0x%x\n",
12398                                 phba->pci_dev_grp);
12399                 break;
12400         }
12401         return rc;
12402 }
12403
12404 /**
12405  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
12406  * @pdev: pointer to PCI device
12407  *
12408  * This routine is registered to the PCI subsystem for error handling. It
12409  * is called when kernel error recovery tells the lpfc driver that it is
12410  * OK to resume normal PCI operation after PCI bus error recovery. When
12411  * this routine is invoked, it dispatches the action to the proper SLI-3
12412  * or SLI-4 device io_resume routine, which will resume the device operation.
12413  **/
12414 static void
12415 lpfc_io_resume(struct pci_dev *pdev)
12416 {
12417         struct Scsi_Host *shost = pci_get_drvdata(pdev);
12418         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
12419
12420         switch (phba->pci_dev_grp) {
12421         case LPFC_PCI_DEV_LP:
12422                 lpfc_io_resume_s3(pdev);
12423                 break;
12424         case LPFC_PCI_DEV_OC:
12425                 lpfc_io_resume_s4(pdev);
12426                 break;
12427         default:
12428                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12429                                 "1429 Invalid PCI device group: 0x%x\n",
12430                                 phba->pci_dev_grp);
12431                 break;
12432         }
12433         return;
12434 }
12435
12436 /**
12437  * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
12438  * @phba: pointer to lpfc hba data structure.
12439  *
12440  * This routine checks to see if OAS is supported for this adapter. If
12441  * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
12442  * the enable oas flag is cleared and the pool created for OAS device data
12443  * is destroyed.
12444  *
12445  **/
12446 void
12447 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
12448 {
12449
12450         if (!phba->cfg_EnableXLane)
12451                 return;
12452
12453         if (phba->sli4_hba.pc_sli4_params.oas_supported) {
12454                 phba->cfg_fof = 1;
12455         } else {
12456                 phba->cfg_fof = 0;
12457                 if (phba->device_data_mem_pool)
12458                         mempool_destroy(phba->device_data_mem_pool);
12459                 phba->device_data_mem_pool = NULL;
12460         }
12461
12462         return;
12463 }
12464
12465 /**
12466  * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
12467  * @phba: pointer to lpfc hba data structure.
12468  *
12469  * This routine checks to see if RAS is supported by the adapter. Check the
12470  * function through which RAS support enablement is to be done.
12471  **/
12472 void
12473 lpfc_sli4_ras_init(struct lpfc_hba *phba)
12474 {
12475         switch (phba->pcidev->device) {
12476         case PCI_DEVICE_ID_LANCER_G6_FC:
12477         case PCI_DEVICE_ID_LANCER_G7_FC:
12478                 phba->ras_fwlog.ras_hwsupport = true;
12479                 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn))
12480                         phba->ras_fwlog.ras_enabled = true;
12481                 else
12482                         phba->ras_fwlog.ras_enabled = false;
12483                 break;
12484         default:
12485                 phba->ras_fwlog.ras_hwsupport = false;
12486         }
12487 }
12488
12489 /**
12490  * lpfc_fof_queue_setup - Set up all the fof queues
12491  * @phba: pointer to lpfc hba data structure.
12492  *
12493  * This routine is invoked to set up all the fof queues for the FC HBA
12494  * operation.
12495  *
12496  * Return codes
12497  *      0 - successful
12498  *      -ENOMEM - No available memory
12499  **/
12500 int
12501 lpfc_fof_queue_setup(struct lpfc_hba *phba)
12502 {
12503         struct lpfc_sli_ring *pring;
12504         int rc;
12505
12506         rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
12507         if (rc)
12508                 return -ENOMEM;
12509
12510         if (phba->cfg_fof) {
12511
12512                 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
12513                                     phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
12514                 if (rc)
12515                         goto out_oas_cq;
12516
12517                 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
12518                                     phba->sli4_hba.oas_cq, LPFC_FCP);
12519                 if (rc)
12520                         goto out_oas_wq;
12521
12522                 /* Bind this CQ/WQ to the NVME ring */
12523                 pring = phba->sli4_hba.oas_wq->pring;
12524                 pring->sli.sli4.wqp =
12525                         (void *)phba->sli4_hba.oas_wq;
12526                 phba->sli4_hba.oas_cq->pring = pring;
12527         }
12528
12529         return 0;
12530
12531 out_oas_wq:
12532         lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
12533 out_oas_cq:
12534         lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
12535         return rc;
12536
12537 }
12538
12539 /**
12540  * lpfc_fof_queue_create - Create all the fof queues
12541  * @phba: pointer to lpfc hba data structure.
12542  *
12543  * This routine is invoked to allocate all the fof queues for the FC HBA
12544  * operation. For each SLI4 queue type, the parameters such as queue entry
12545  * count (queue depth) shall be taken from the module parameter. For now,
12546  * we just use some constant number as place holder.
12547  *
12548  * Return codes
12549  *      0 - successful
12550  *      -ENOMEM - No availble memory
12551  *      -EIO - The mailbox failed to complete successfully.
12552  **/
12553 int
12554 lpfc_fof_queue_create(struct lpfc_hba *phba)
12555 {
12556         struct lpfc_queue *qdesc;
12557         uint32_t wqesize;
12558
12559         /* Create FOF EQ */
12560         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
12561                                       phba->sli4_hba.eq_esize,
12562                                       phba->sli4_hba.eq_ecount);
12563         if (!qdesc)
12564                 goto out_error;
12565
12566         qdesc->qe_valid = 1;
12567         phba->sli4_hba.fof_eq = qdesc;
12568
12569         if (phba->cfg_fof) {
12570
12571                 /* Create OAS CQ */
12572                 if (phba->enab_exp_wqcq_pages)
12573                         qdesc = lpfc_sli4_queue_alloc(phba,
12574                                                       LPFC_EXPANDED_PAGE_SIZE,
12575                                                       phba->sli4_hba.cq_esize,
12576                                                       LPFC_CQE_EXP_COUNT);
12577                 else
12578                         qdesc = lpfc_sli4_queue_alloc(phba,
12579                                                       LPFC_DEFAULT_PAGE_SIZE,
12580                                                       phba->sli4_hba.cq_esize,
12581                                                       phba->sli4_hba.cq_ecount);
12582                 if (!qdesc)
12583                         goto out_error;
12584
12585                 qdesc->qe_valid = 1;
12586                 phba->sli4_hba.oas_cq = qdesc;
12587
12588                 /* Create OAS WQ */
12589                 if (phba->enab_exp_wqcq_pages) {
12590                         wqesize = (phba->fcp_embed_io) ?
12591                                 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
12592                         qdesc = lpfc_sli4_queue_alloc(phba,
12593                                                       LPFC_EXPANDED_PAGE_SIZE,
12594                                                       wqesize,
12595                                                       LPFC_WQE_EXP_COUNT);
12596                 } else
12597                         qdesc = lpfc_sli4_queue_alloc(phba,
12598                                                       LPFC_DEFAULT_PAGE_SIZE,
12599                                                       phba->sli4_hba.wq_esize,
12600                                                       phba->sli4_hba.wq_ecount);
12601
12602                 if (!qdesc)
12603                         goto out_error;
12604
12605                 phba->sli4_hba.oas_wq = qdesc;
12606                 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
12607
12608         }
12609         return 0;
12610
12611 out_error:
12612         lpfc_fof_queue_destroy(phba);
12613         return -ENOMEM;
12614 }
12615
12616 /**
12617  * lpfc_fof_queue_destroy - Destroy all the fof queues
12618  * @phba: pointer to lpfc hba data structure.
12619  *
12620  * This routine is invoked to release all the SLI4 queues with the FC HBA
12621  * operation.
12622  *
12623  * Return codes
12624  *      0 - successful
12625  **/
12626 int
12627 lpfc_fof_queue_destroy(struct lpfc_hba *phba)
12628 {
12629         /* Release FOF Event queue */
12630         if (phba->sli4_hba.fof_eq != NULL) {
12631                 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
12632                 phba->sli4_hba.fof_eq = NULL;
12633         }
12634
12635         /* Release OAS Completion queue */
12636         if (phba->sli4_hba.oas_cq != NULL) {
12637                 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
12638                 phba->sli4_hba.oas_cq = NULL;
12639         }
12640
12641         /* Release OAS Work queue */
12642         if (phba->sli4_hba.oas_wq != NULL) {
12643                 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
12644                 phba->sli4_hba.oas_wq = NULL;
12645         }
12646         return 0;
12647 }
12648
12649 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
12650
12651 static const struct pci_error_handlers lpfc_err_handler = {
12652         .error_detected = lpfc_io_error_detected,
12653         .slot_reset = lpfc_io_slot_reset,
12654         .resume = lpfc_io_resume,
12655 };
12656
12657 static struct pci_driver lpfc_driver = {
12658         .name           = LPFC_DRIVER_NAME,
12659         .id_table       = lpfc_id_table,
12660         .probe          = lpfc_pci_probe_one,
12661         .remove         = lpfc_pci_remove_one,
12662         .shutdown       = lpfc_pci_remove_one,
12663         .suspend        = lpfc_pci_suspend_one,
12664         .resume         = lpfc_pci_resume_one,
12665         .err_handler    = &lpfc_err_handler,
12666 };
12667
12668 static const struct file_operations lpfc_mgmt_fop = {
12669         .owner = THIS_MODULE,
12670 };
12671
12672 static struct miscdevice lpfc_mgmt_dev = {
12673         .minor = MISC_DYNAMIC_MINOR,
12674         .name = "lpfcmgmt",
12675         .fops = &lpfc_mgmt_fop,
12676 };
12677
12678 /**
12679  * lpfc_init - lpfc module initialization routine
12680  *
12681  * This routine is to be invoked when the lpfc module is loaded into the
12682  * kernel. The special kernel macro module_init() is used to indicate the
12683  * role of this routine to the kernel as lpfc module entry point.
12684  *
12685  * Return codes
12686  *   0 - successful
12687  *   -ENOMEM - FC attach transport failed
12688  *   all others - failed
12689  */
12690 static int __init
12691 lpfc_init(void)
12692 {
12693         int error = 0;
12694
12695         printk(LPFC_MODULE_DESC "\n");
12696         printk(LPFC_COPYRIGHT "\n");
12697
12698         error = misc_register(&lpfc_mgmt_dev);
12699         if (error)
12700                 printk(KERN_ERR "Could not register lpfcmgmt device, "
12701                         "misc_register returned with status %d", error);
12702
12703         lpfc_transport_functions.vport_create = lpfc_vport_create;
12704         lpfc_transport_functions.vport_delete = lpfc_vport_delete;
12705         lpfc_transport_template =
12706                                 fc_attach_transport(&lpfc_transport_functions);
12707         if (lpfc_transport_template == NULL)
12708                 return -ENOMEM;
12709         lpfc_vport_transport_template =
12710                 fc_attach_transport(&lpfc_vport_transport_functions);
12711         if (lpfc_vport_transport_template == NULL) {
12712                 fc_release_transport(lpfc_transport_template);
12713                 return -ENOMEM;
12714         }
12715         lpfc_nvme_cmd_template();
12716         lpfc_nvmet_cmd_template();
12717
12718         /* Initialize in case vector mapping is needed */
12719         lpfc_used_cpu = NULL;
12720         lpfc_present_cpu = num_present_cpus();
12721
12722         error = pci_register_driver(&lpfc_driver);
12723         if (error) {
12724                 fc_release_transport(lpfc_transport_template);
12725                 fc_release_transport(lpfc_vport_transport_template);
12726         }
12727
12728         return error;
12729 }
12730
12731 /**
12732  * lpfc_exit - lpfc module removal routine
12733  *
12734  * This routine is invoked when the lpfc module is removed from the kernel.
12735  * The special kernel macro module_exit() is used to indicate the role of
12736  * this routine to the kernel as lpfc module exit point.
12737  */
12738 static void __exit
12739 lpfc_exit(void)
12740 {
12741         misc_deregister(&lpfc_mgmt_dev);
12742         pci_unregister_driver(&lpfc_driver);
12743         fc_release_transport(lpfc_transport_template);
12744         fc_release_transport(lpfc_vport_transport_template);
12745         if (_dump_buf_data) {
12746                 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
12747                                 "_dump_buf_data at 0x%p\n",
12748                                 (1L << _dump_buf_data_order), _dump_buf_data);
12749                 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
12750         }
12751
12752         if (_dump_buf_dif) {
12753                 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
12754                                 "_dump_buf_dif at 0x%p\n",
12755                                 (1L << _dump_buf_dif_order), _dump_buf_dif);
12756                 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
12757         }
12758         kfree(lpfc_used_cpu);
12759         idr_destroy(&lpfc_hba_index);
12760 }
12761
12762 module_init(lpfc_init);
12763 module_exit(lpfc_exit);
12764 MODULE_LICENSE("GPL");
12765 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
12766 MODULE_AUTHOR("Broadcom");
12767 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);