1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/kthread.h>
29 #include <linux/pci.h>
30 #include <linux/spinlock.h>
31 #include <linux/ctype.h>
32 #include <linux/aer.h>
33 #include <linux/slab.h>
34 #include <linux/firmware.h>
35 #include <linux/miscdevice.h>
36 #include <linux/percpu.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_transport_fc.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
49 #include "lpfc_scsi.h"
51 #include "lpfc_logmsg.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_version.h"
58 unsigned long _dump_buf_data_order;
60 unsigned long _dump_buf_dif_order;
61 spinlock_t _dump_buf_lock;
63 /* Used when mapping IRQ vectors in a driver centric manner */
64 uint16_t *lpfc_used_cpu;
65 uint32_t lpfc_present_cpu;
67 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
68 static int lpfc_post_rcv_buf(struct lpfc_hba *);
69 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
70 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
71 static int lpfc_setup_endian_order(struct lpfc_hba *);
72 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
73 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
74 static void lpfc_init_sgl_list(struct lpfc_hba *);
75 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
76 static void lpfc_free_active_sgl(struct lpfc_hba *);
77 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
78 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
79 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
80 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
81 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
82 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
83 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
84 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
86 static struct scsi_transport_template *lpfc_transport_template = NULL;
87 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
88 static DEFINE_IDR(lpfc_hba_index);
91 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
92 * @phba: pointer to lpfc hba data structure.
94 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
95 * mailbox command. It retrieves the revision information from the HBA and
96 * collects the Vital Product Data (VPD) about the HBA for preparing the
97 * configuration of the HBA.
101 * -ERESTART - requests the SLI layer to reset the HBA and try again.
102 * Any other value - indicates an error.
105 lpfc_config_port_prep(struct lpfc_hba *phba)
107 lpfc_vpd_t *vp = &phba->vpd;
111 char *lpfc_vpd_data = NULL;
113 static char licensed[56] =
114 "key unlock for use with gnu public licensed code only\0";
115 static int init_key = 1;
117 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
119 phba->link_state = LPFC_HBA_ERROR;
124 phba->link_state = LPFC_INIT_MBX_CMDS;
126 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
128 uint32_t *ptext = (uint32_t *) licensed;
130 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
131 *ptext = cpu_to_be32(*ptext);
135 lpfc_read_nv(phba, pmb);
136 memset((char*)mb->un.varRDnvp.rsvd3, 0,
137 sizeof (mb->un.varRDnvp.rsvd3));
138 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
141 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
143 if (rc != MBX_SUCCESS) {
144 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
145 "0324 Config Port initialization "
146 "error, mbxCmd x%x READ_NVPARM, "
148 mb->mbxCommand, mb->mbxStatus);
149 mempool_free(pmb, phba->mbox_mem_pool);
152 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
154 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
158 phba->sli3_options = 0x0;
160 /* Setup and issue mailbox READ REV command */
161 lpfc_read_rev(phba, pmb);
162 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
163 if (rc != MBX_SUCCESS) {
164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
165 "0439 Adapter failed to init, mbxCmd x%x "
166 "READ_REV, mbxStatus x%x\n",
167 mb->mbxCommand, mb->mbxStatus);
168 mempool_free( pmb, phba->mbox_mem_pool);
174 * The value of rr must be 1 since the driver set the cv field to 1.
175 * This setting requires the FW to set all revision fields.
177 if (mb->un.varRdRev.rr == 0) {
179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
180 "0440 Adapter failed to init, READ_REV has "
181 "missing revision information.\n");
182 mempool_free(pmb, phba->mbox_mem_pool);
186 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
187 mempool_free(pmb, phba->mbox_mem_pool);
191 /* Save information as VPD data */
193 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
194 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
195 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
196 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
197 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
198 vp->rev.biuRev = mb->un.varRdRev.biuRev;
199 vp->rev.smRev = mb->un.varRdRev.smRev;
200 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
201 vp->rev.endecRev = mb->un.varRdRev.endecRev;
202 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
203 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
204 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
205 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
206 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
207 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
209 /* If the sli feature level is less then 9, we must
210 * tear down all RPIs and VPIs on link down if NPIV
213 if (vp->rev.feaLevelHigh < 9)
214 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
216 if (lpfc_is_LC_HBA(phba->pcidev->device))
217 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
218 sizeof (phba->RandomData));
220 /* Get adapter VPD information */
221 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
225 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
226 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
228 if (rc != MBX_SUCCESS) {
229 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
230 "0441 VPD not present on adapter, "
231 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
232 mb->mbxCommand, mb->mbxStatus);
233 mb->un.varDmp.word_cnt = 0;
235 /* dump mem may return a zero when finished or we got a
236 * mailbox error, either way we are done.
238 if (mb->un.varDmp.word_cnt == 0)
240 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
241 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
242 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
243 lpfc_vpd_data + offset,
244 mb->un.varDmp.word_cnt);
245 offset += mb->un.varDmp.word_cnt;
246 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
247 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
249 kfree(lpfc_vpd_data);
251 mempool_free(pmb, phba->mbox_mem_pool);
256 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
257 * @phba: pointer to lpfc hba data structure.
258 * @pmboxq: pointer to the driver internal queue element for mailbox command.
260 * This is the completion handler for driver's configuring asynchronous event
261 * mailbox command to the device. If the mailbox command returns successfully,
262 * it will set internal async event support flag to 1; otherwise, it will
263 * set internal async event support flag to 0.
266 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
268 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
269 phba->temp_sensor_support = 1;
271 phba->temp_sensor_support = 0;
272 mempool_free(pmboxq, phba->mbox_mem_pool);
277 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
278 * @phba: pointer to lpfc hba data structure.
279 * @pmboxq: pointer to the driver internal queue element for mailbox command.
281 * This is the completion handler for dump mailbox command for getting
282 * wake up parameters. When this command complete, the response contain
283 * Option rom version of the HBA. This function translate the version number
284 * into a human readable string and store it in OptionROMVersion.
287 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
290 uint32_t prog_id_word;
292 /* character array used for decoding dist type. */
293 char dist_char[] = "nabx";
295 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
296 mempool_free(pmboxq, phba->mbox_mem_pool);
300 prg = (struct prog_id *) &prog_id_word;
302 /* word 7 contain option rom version */
303 prog_id_word = pmboxq->u.mb.un.varWords[7];
305 /* Decode the Option rom version word to a readable string */
307 dist = dist_char[prg->dist];
309 if ((prg->dist == 3) && (prg->num == 0))
310 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
311 prg->ver, prg->rev, prg->lev);
313 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
314 prg->ver, prg->rev, prg->lev,
316 mempool_free(pmboxq, phba->mbox_mem_pool);
321 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
322 * cfg_soft_wwnn, cfg_soft_wwpn
323 * @vport: pointer to lpfc vport data structure.
330 lpfc_update_vport_wwn(struct lpfc_vport *vport)
332 /* If the soft name exists then update it using the service params */
333 if (vport->phba->cfg_soft_wwnn)
334 u64_to_wwn(vport->phba->cfg_soft_wwnn,
335 vport->fc_sparam.nodeName.u.wwn);
336 if (vport->phba->cfg_soft_wwpn)
337 u64_to_wwn(vport->phba->cfg_soft_wwpn,
338 vport->fc_sparam.portName.u.wwn);
341 * If the name is empty or there exists a soft name
342 * then copy the service params name, otherwise use the fc name
344 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
345 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
346 sizeof(struct lpfc_name));
348 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
349 sizeof(struct lpfc_name));
351 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
352 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
353 sizeof(struct lpfc_name));
355 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
356 sizeof(struct lpfc_name));
360 * lpfc_config_port_post - Perform lpfc initialization after config port
361 * @phba: pointer to lpfc hba data structure.
363 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
364 * command call. It performs all internal resource and state setups on the
365 * port: post IOCB buffers, enable appropriate host interrupt attentions,
366 * ELS ring timers, etc.
370 * Any other value - error.
373 lpfc_config_port_post(struct lpfc_hba *phba)
375 struct lpfc_vport *vport = phba->pport;
376 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
379 struct lpfc_dmabuf *mp;
380 struct lpfc_sli *psli = &phba->sli;
381 uint32_t status, timeout;
385 spin_lock_irq(&phba->hbalock);
387 * If the Config port completed correctly the HBA is not
388 * over heated any more.
390 if (phba->over_temp_state == HBA_OVER_TEMP)
391 phba->over_temp_state = HBA_NORMAL_TEMP;
392 spin_unlock_irq(&phba->hbalock);
394 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
396 phba->link_state = LPFC_HBA_ERROR;
401 /* Get login parameters for NID. */
402 rc = lpfc_read_sparam(phba, pmb, 0);
404 mempool_free(pmb, phba->mbox_mem_pool);
409 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
411 "0448 Adapter failed init, mbxCmd x%x "
412 "READ_SPARM mbxStatus x%x\n",
413 mb->mbxCommand, mb->mbxStatus);
414 phba->link_state = LPFC_HBA_ERROR;
415 mp = (struct lpfc_dmabuf *) pmb->context1;
416 mempool_free(pmb, phba->mbox_mem_pool);
417 lpfc_mbuf_free(phba, mp->virt, mp->phys);
422 mp = (struct lpfc_dmabuf *) pmb->context1;
424 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
425 lpfc_mbuf_free(phba, mp->virt, mp->phys);
427 pmb->context1 = NULL;
428 lpfc_update_vport_wwn(vport);
430 /* Update the fc_host data structures with new wwn. */
431 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
432 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
433 fc_host_max_npiv_vports(shost) = phba->max_vpi;
435 /* If no serial number in VPD data, use low 6 bytes of WWNN */
436 /* This should be consolidated into parse_vpd ? - mr */
437 if (phba->SerialNumber[0] == 0) {
440 outptr = &vport->fc_nodename.u.s.IEEE[0];
441 for (i = 0; i < 12; i++) {
443 j = ((status & 0xf0) >> 4);
445 phba->SerialNumber[i] =
446 (char)((uint8_t) 0x30 + (uint8_t) j);
448 phba->SerialNumber[i] =
449 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
453 phba->SerialNumber[i] =
454 (char)((uint8_t) 0x30 + (uint8_t) j);
456 phba->SerialNumber[i] =
457 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
461 lpfc_read_config(phba, pmb);
463 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
465 "0453 Adapter failed to init, mbxCmd x%x "
466 "READ_CONFIG, mbxStatus x%x\n",
467 mb->mbxCommand, mb->mbxStatus);
468 phba->link_state = LPFC_HBA_ERROR;
469 mempool_free( pmb, phba->mbox_mem_pool);
473 /* Check if the port is disabled */
474 lpfc_sli_read_link_ste(phba);
476 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
477 i = (mb->un.varRdConfig.max_xri + 1);
478 if (phba->cfg_hba_queue_depth > i) {
479 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
480 "3359 HBA queue depth changed from %d to %d\n",
481 phba->cfg_hba_queue_depth, i);
482 phba->cfg_hba_queue_depth = i;
485 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
486 i = (mb->un.varRdConfig.max_xri >> 3);
487 if (phba->pport->cfg_lun_queue_depth > i) {
488 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
489 "3360 LUN queue depth changed from %d to %d\n",
490 phba->pport->cfg_lun_queue_depth, i);
491 phba->pport->cfg_lun_queue_depth = i;
494 phba->lmt = mb->un.varRdConfig.lmt;
496 /* Get the default values for Model Name and Description */
497 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
499 phba->link_state = LPFC_LINK_DOWN;
501 /* Only process IOCBs on ELS ring till hba_state is READY */
502 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
503 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
504 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
505 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
506 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
507 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
509 /* Post receive buffers for desired rings */
510 if (phba->sli_rev != 3)
511 lpfc_post_rcv_buf(phba);
514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
516 if (phba->intr_type == MSIX) {
517 rc = lpfc_config_msi(phba, pmb);
519 mempool_free(pmb, phba->mbox_mem_pool);
522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
523 if (rc != MBX_SUCCESS) {
524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
525 "0352 Config MSI mailbox command "
526 "failed, mbxCmd x%x, mbxStatus x%x\n",
527 pmb->u.mb.mbxCommand,
528 pmb->u.mb.mbxStatus);
529 mempool_free(pmb, phba->mbox_mem_pool);
534 spin_lock_irq(&phba->hbalock);
535 /* Initialize ERATT handling flag */
536 phba->hba_flag &= ~HBA_ERATT_HANDLED;
538 /* Enable appropriate host interrupts */
539 if (lpfc_readl(phba->HCregaddr, &status)) {
540 spin_unlock_irq(&phba->hbalock);
543 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
544 if (psli->num_rings > 0)
545 status |= HC_R0INT_ENA;
546 if (psli->num_rings > 1)
547 status |= HC_R1INT_ENA;
548 if (psli->num_rings > 2)
549 status |= HC_R2INT_ENA;
550 if (psli->num_rings > 3)
551 status |= HC_R3INT_ENA;
553 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
554 (phba->cfg_poll & DISABLE_FCP_RING_INT))
555 status &= ~(HC_R0INT_ENA);
557 writel(status, phba->HCregaddr);
558 readl(phba->HCregaddr); /* flush */
559 spin_unlock_irq(&phba->hbalock);
561 /* Set up ring-0 (ELS) timer */
562 timeout = phba->fc_ratov * 2;
563 mod_timer(&vport->els_tmofunc,
564 jiffies + msecs_to_jiffies(1000 * timeout));
565 /* Set up heart beat (HB) timer */
566 mod_timer(&phba->hb_tmofunc,
567 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
568 phba->hb_outstanding = 0;
569 phba->last_completion_time = jiffies;
570 /* Set up error attention (ERATT) polling timer */
571 mod_timer(&phba->eratt_poll,
572 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
574 if (phba->hba_flag & LINK_DISABLED) {
575 lpfc_printf_log(phba,
577 "2598 Adapter Link is disabled.\n");
578 lpfc_down_link(phba, pmb);
579 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
580 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
581 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
582 lpfc_printf_log(phba,
584 "2599 Adapter failed to issue DOWN_LINK"
585 " mbox command rc 0x%x\n", rc);
587 mempool_free(pmb, phba->mbox_mem_pool);
590 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
591 mempool_free(pmb, phba->mbox_mem_pool);
592 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
596 /* MBOX buffer will be freed in mbox compl */
597 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
599 phba->link_state = LPFC_HBA_ERROR;
603 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
604 pmb->mbox_cmpl = lpfc_config_async_cmpl;
605 pmb->vport = phba->pport;
606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
608 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
609 lpfc_printf_log(phba,
612 "0456 Adapter failed to issue "
613 "ASYNCEVT_ENABLE mbox status x%x\n",
615 mempool_free(pmb, phba->mbox_mem_pool);
618 /* Get Option rom version */
619 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
621 phba->link_state = LPFC_HBA_ERROR;
625 lpfc_dump_wakeup_param(phba, pmb);
626 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
627 pmb->vport = phba->pport;
628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
630 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
632 "to get Option ROM version status x%x\n", rc);
633 mempool_free(pmb, phba->mbox_mem_pool);
640 * lpfc_hba_init_link - Initialize the FC link
641 * @phba: pointer to lpfc hba data structure.
642 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
644 * This routine will issue the INIT_LINK mailbox command call.
645 * It is available to other drivers through the lpfc_hba data
646 * structure for use as a delayed link up mechanism with the
647 * module parameter lpfc_suppress_link_up.
651 * Any other value - error
654 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
656 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
660 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
661 * @phba: pointer to lpfc hba data structure.
662 * @fc_topology: desired fc topology.
663 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
665 * This routine will issue the INIT_LINK mailbox command call.
666 * It is available to other drivers through the lpfc_hba data
667 * structure for use as a delayed link up mechanism with the
668 * module parameter lpfc_suppress_link_up.
672 * Any other value - error
675 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
678 struct lpfc_vport *vport = phba->pport;
683 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
685 phba->link_state = LPFC_HBA_ERROR;
691 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
692 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
693 !(phba->lmt & LMT_1Gb)) ||
694 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
695 !(phba->lmt & LMT_2Gb)) ||
696 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
697 !(phba->lmt & LMT_4Gb)) ||
698 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
699 !(phba->lmt & LMT_8Gb)) ||
700 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
701 !(phba->lmt & LMT_10Gb)) ||
702 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
703 !(phba->lmt & LMT_16Gb)) ||
704 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
705 !(phba->lmt & LMT_32Gb))) {
706 /* Reset link speed to auto */
707 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
708 "1302 Invalid speed for this board:%d "
709 "Reset link speed to auto.\n",
710 phba->cfg_link_speed);
711 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
713 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
714 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
715 if (phba->sli_rev < LPFC_SLI_REV4)
716 lpfc_set_loopback_flag(phba);
717 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
718 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
720 "0498 Adapter failed to init, mbxCmd x%x "
721 "INIT_LINK, mbxStatus x%x\n",
722 mb->mbxCommand, mb->mbxStatus);
723 if (phba->sli_rev <= LPFC_SLI_REV3) {
724 /* Clear all interrupt enable conditions */
725 writel(0, phba->HCregaddr);
726 readl(phba->HCregaddr); /* flush */
727 /* Clear all pending interrupts */
728 writel(0xffffffff, phba->HAregaddr);
729 readl(phba->HAregaddr); /* flush */
731 phba->link_state = LPFC_HBA_ERROR;
732 if (rc != MBX_BUSY || flag == MBX_POLL)
733 mempool_free(pmb, phba->mbox_mem_pool);
736 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
737 if (flag == MBX_POLL)
738 mempool_free(pmb, phba->mbox_mem_pool);
744 * lpfc_hba_down_link - this routine downs the FC link
745 * @phba: pointer to lpfc hba data structure.
746 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
748 * This routine will issue the DOWN_LINK mailbox command call.
749 * It is available to other drivers through the lpfc_hba data
750 * structure for use to stop the link.
754 * Any other value - error
757 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
762 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
764 phba->link_state = LPFC_HBA_ERROR;
768 lpfc_printf_log(phba,
770 "0491 Adapter Link is disabled.\n");
771 lpfc_down_link(phba, pmb);
772 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
773 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
774 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
775 lpfc_printf_log(phba,
777 "2522 Adapter failed to issue DOWN_LINK"
778 " mbox command rc 0x%x\n", rc);
780 mempool_free(pmb, phba->mbox_mem_pool);
783 if (flag == MBX_POLL)
784 mempool_free(pmb, phba->mbox_mem_pool);
790 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
791 * @phba: pointer to lpfc HBA data structure.
793 * This routine will do LPFC uninitialization before the HBA is reset when
794 * bringing down the SLI Layer.
798 * Any other value - error.
801 lpfc_hba_down_prep(struct lpfc_hba *phba)
803 struct lpfc_vport **vports;
806 if (phba->sli_rev <= LPFC_SLI_REV3) {
807 /* Disable interrupts */
808 writel(0, phba->HCregaddr);
809 readl(phba->HCregaddr); /* flush */
812 if (phba->pport->load_flag & FC_UNLOADING)
813 lpfc_cleanup_discovery_resources(phba->pport);
815 vports = lpfc_create_vport_work_array(phba);
817 for (i = 0; i <= phba->max_vports &&
818 vports[i] != NULL; i++)
819 lpfc_cleanup_discovery_resources(vports[i]);
820 lpfc_destroy_vport_work_array(phba, vports);
826 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
827 * rspiocb which got deferred
829 * @phba: pointer to lpfc HBA data structure.
831 * This routine will cleanup completed slow path events after HBA is reset
832 * when bringing down the SLI Layer.
839 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
841 struct lpfc_iocbq *rspiocbq;
842 struct hbq_dmabuf *dmabuf;
843 struct lpfc_cq_event *cq_event;
845 spin_lock_irq(&phba->hbalock);
846 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
847 spin_unlock_irq(&phba->hbalock);
849 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
850 /* Get the response iocb from the head of work queue */
851 spin_lock_irq(&phba->hbalock);
852 list_remove_head(&phba->sli4_hba.sp_queue_event,
853 cq_event, struct lpfc_cq_event, list);
854 spin_unlock_irq(&phba->hbalock);
856 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
857 case CQE_CODE_COMPL_WQE:
858 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
860 lpfc_sli_release_iocbq(phba, rspiocbq);
862 case CQE_CODE_RECEIVE:
863 case CQE_CODE_RECEIVE_V1:
864 dmabuf = container_of(cq_event, struct hbq_dmabuf,
866 lpfc_in_buf_free(phba, &dmabuf->dbuf);
872 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
873 * @phba: pointer to lpfc HBA data structure.
875 * This routine will cleanup posted ELS buffers after the HBA is reset
876 * when bringing down the SLI Layer.
883 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
885 struct lpfc_sli *psli = &phba->sli;
886 struct lpfc_sli_ring *pring;
887 struct lpfc_dmabuf *mp, *next_mp;
891 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
892 lpfc_sli_hbqbuf_free_all(phba);
894 /* Cleanup preposted buffers on the ELS ring */
895 pring = &psli->ring[LPFC_ELS_RING];
896 spin_lock_irq(&phba->hbalock);
897 list_splice_init(&pring->postbufq, &buflist);
898 spin_unlock_irq(&phba->hbalock);
901 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
904 lpfc_mbuf_free(phba, mp->virt, mp->phys);
908 spin_lock_irq(&phba->hbalock);
909 pring->postbufq_cnt -= count;
910 spin_unlock_irq(&phba->hbalock);
915 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
916 * @phba: pointer to lpfc HBA data structure.
918 * This routine will cleanup the txcmplq after the HBA is reset when bringing
919 * down the SLI Layer.
925 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
927 struct lpfc_sli *psli = &phba->sli;
928 struct lpfc_sli_ring *pring;
929 LIST_HEAD(completions);
932 for (i = 0; i < psli->num_rings; i++) {
933 pring = &psli->ring[i];
934 if (phba->sli_rev >= LPFC_SLI_REV4)
935 spin_lock_irq(&pring->ring_lock);
937 spin_lock_irq(&phba->hbalock);
938 /* At this point in time the HBA is either reset or DOA. Either
939 * way, nothing should be on txcmplq as it will NEVER complete.
941 list_splice_init(&pring->txcmplq, &completions);
942 pring->txcmplq_cnt = 0;
944 if (phba->sli_rev >= LPFC_SLI_REV4)
945 spin_unlock_irq(&pring->ring_lock);
947 spin_unlock_irq(&phba->hbalock);
949 /* Cancel all the IOCBs from the completions list */
950 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
952 lpfc_sli_abort_iocb_ring(phba, pring);
957 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
959 * @phba: pointer to lpfc HBA data structure.
961 * This routine will do uninitialization after the HBA is reset when bring
962 * down the SLI Layer.
966 * Any other value - error.
969 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
971 lpfc_hba_free_post_buf(phba);
972 lpfc_hba_clean_txcmplq(phba);
977 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
978 * @phba: pointer to lpfc HBA data structure.
980 * This routine will do uninitialization after the HBA is reset when bring
981 * down the SLI Layer.
985 * Any other value - error.
988 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
990 struct lpfc_scsi_buf *psb, *psb_next;
992 unsigned long iflag = 0;
993 struct lpfc_sglq *sglq_entry = NULL;
994 struct lpfc_sli *psli = &phba->sli;
995 struct lpfc_sli_ring *pring;
997 lpfc_hba_free_post_buf(phba);
998 lpfc_hba_clean_txcmplq(phba);
999 pring = &psli->ring[LPFC_ELS_RING];
1001 /* At this point in time the HBA is either reset or DOA. Either
1002 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1003 * on the lpfc_sgl_list so that it can either be freed if the
1004 * driver is unloading or reposted if the driver is restarting
1007 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
1009 /* abts_sgl_list_lock required because worker thread uses this
1012 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
1013 list_for_each_entry(sglq_entry,
1014 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1015 sglq_entry->state = SGL_FREED;
1017 spin_lock(&pring->ring_lock);
1018 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1019 &phba->sli4_hba.lpfc_sgl_list);
1020 spin_unlock(&pring->ring_lock);
1021 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
1022 /* abts_scsi_buf_list_lock required because worker thread uses this
1025 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1026 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
1028 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1029 spin_unlock_irq(&phba->hbalock);
1031 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1033 psb->status = IOSTAT_SUCCESS;
1035 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1036 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
1037 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1039 lpfc_sli4_free_sp_events(phba);
1044 * lpfc_hba_down_post - Wrapper func for hba down post routine
1045 * @phba: pointer to lpfc HBA data structure.
1047 * This routine wraps the actual SLI3 or SLI4 routine for performing
1048 * uninitialization after the HBA is reset when bring down the SLI Layer.
1052 * Any other value - error.
1055 lpfc_hba_down_post(struct lpfc_hba *phba)
1057 return (*phba->lpfc_hba_down_post)(phba);
1061 * lpfc_hb_timeout - The HBA-timer timeout handler
1062 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1064 * This is the HBA-timer timeout handler registered to the lpfc driver. When
1065 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1066 * work-port-events bitmap and the worker thread is notified. This timeout
1067 * event will be used by the worker thread to invoke the actual timeout
1068 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1069 * be performed in the timeout handler and the HBA timeout event bit shall
1070 * be cleared by the worker thread after it has taken the event bitmap out.
1073 lpfc_hb_timeout(unsigned long ptr)
1075 struct lpfc_hba *phba;
1076 uint32_t tmo_posted;
1077 unsigned long iflag;
1079 phba = (struct lpfc_hba *)ptr;
1081 /* Check for heart beat timeout conditions */
1082 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1083 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1085 phba->pport->work_port_events |= WORKER_HB_TMO;
1086 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1088 /* Tell the worker thread there is work to do */
1090 lpfc_worker_wake_up(phba);
1095 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1096 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1098 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1099 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1100 * work-port-events bitmap and the worker thread is notified. This timeout
1101 * event will be used by the worker thread to invoke the actual timeout
1102 * handler routine, lpfc_rrq_handler. Any periodical operations will
1103 * be performed in the timeout handler and the RRQ timeout event bit shall
1104 * be cleared by the worker thread after it has taken the event bitmap out.
1107 lpfc_rrq_timeout(unsigned long ptr)
1109 struct lpfc_hba *phba;
1110 unsigned long iflag;
1112 phba = (struct lpfc_hba *)ptr;
1113 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1114 if (!(phba->pport->load_flag & FC_UNLOADING))
1115 phba->hba_flag |= HBA_RRQ_ACTIVE;
1117 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1118 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1120 if (!(phba->pport->load_flag & FC_UNLOADING))
1121 lpfc_worker_wake_up(phba);
1125 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1126 * @phba: pointer to lpfc hba data structure.
1127 * @pmboxq: pointer to the driver internal queue element for mailbox command.
1129 * This is the callback function to the lpfc heart-beat mailbox command.
1130 * If configured, the lpfc driver issues the heart-beat mailbox command to
1131 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1132 * heart-beat mailbox command is issued, the driver shall set up heart-beat
1133 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1134 * heart-beat outstanding state. Once the mailbox command comes back and
1135 * no error conditions detected, the heart-beat mailbox command timer is
1136 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1137 * state is cleared for the next heart-beat. If the timer expired with the
1138 * heart-beat outstanding state set, the driver will put the HBA offline.
1141 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1143 unsigned long drvr_flag;
1145 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1146 phba->hb_outstanding = 0;
1147 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1149 /* Check and reset heart-beat timer is necessary */
1150 mempool_free(pmboxq, phba->mbox_mem_pool);
1151 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1152 !(phba->link_state == LPFC_HBA_ERROR) &&
1153 !(phba->pport->load_flag & FC_UNLOADING))
1154 mod_timer(&phba->hb_tmofunc,
1156 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1161 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1162 * @phba: pointer to lpfc hba data structure.
1164 * This is the actual HBA-timer timeout handler to be invoked by the worker
1165 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1166 * handler performs any periodic operations needed for the device. If such
1167 * periodic event has already been attended to either in the interrupt handler
1168 * or by processing slow-ring or fast-ring events within the HBA-timer
1169 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1170 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1171 * is configured and there is no heart-beat mailbox command outstanding, a
1172 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1173 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1177 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1179 struct lpfc_vport **vports;
1180 LPFC_MBOXQ_t *pmboxq;
1181 struct lpfc_dmabuf *buf_ptr;
1183 struct lpfc_sli *psli = &phba->sli;
1184 LIST_HEAD(completions);
1186 vports = lpfc_create_vport_work_array(phba);
1188 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1189 lpfc_rcv_seq_check_edtov(vports[i]);
1190 lpfc_fdmi_num_disc_check(vports[i]);
1192 lpfc_destroy_vport_work_array(phba, vports);
1194 if ((phba->link_state == LPFC_HBA_ERROR) ||
1195 (phba->pport->load_flag & FC_UNLOADING) ||
1196 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1199 spin_lock_irq(&phba->pport->work_port_lock);
1201 if (time_after(phba->last_completion_time +
1202 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1204 spin_unlock_irq(&phba->pport->work_port_lock);
1205 if (!phba->hb_outstanding)
1206 mod_timer(&phba->hb_tmofunc,
1208 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1210 mod_timer(&phba->hb_tmofunc,
1212 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1215 spin_unlock_irq(&phba->pport->work_port_lock);
1217 if (phba->elsbuf_cnt &&
1218 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1219 spin_lock_irq(&phba->hbalock);
1220 list_splice_init(&phba->elsbuf, &completions);
1221 phba->elsbuf_cnt = 0;
1222 phba->elsbuf_prev_cnt = 0;
1223 spin_unlock_irq(&phba->hbalock);
1225 while (!list_empty(&completions)) {
1226 list_remove_head(&completions, buf_ptr,
1227 struct lpfc_dmabuf, list);
1228 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1232 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1234 /* If there is no heart beat outstanding, issue a heartbeat command */
1235 if (phba->cfg_enable_hba_heartbeat) {
1236 if (!phba->hb_outstanding) {
1237 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1238 (list_empty(&psli->mboxq))) {
1239 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1242 mod_timer(&phba->hb_tmofunc,
1244 msecs_to_jiffies(1000 *
1245 LPFC_HB_MBOX_INTERVAL));
1249 lpfc_heart_beat(phba, pmboxq);
1250 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1251 pmboxq->vport = phba->pport;
1252 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1255 if (retval != MBX_BUSY &&
1256 retval != MBX_SUCCESS) {
1257 mempool_free(pmboxq,
1258 phba->mbox_mem_pool);
1259 mod_timer(&phba->hb_tmofunc,
1261 msecs_to_jiffies(1000 *
1262 LPFC_HB_MBOX_INTERVAL));
1265 phba->skipped_hb = 0;
1266 phba->hb_outstanding = 1;
1267 } else if (time_before_eq(phba->last_completion_time,
1268 phba->skipped_hb)) {
1269 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1270 "2857 Last completion time not "
1271 " updated in %d ms\n",
1272 jiffies_to_msecs(jiffies
1273 - phba->last_completion_time));
1275 phba->skipped_hb = jiffies;
1277 mod_timer(&phba->hb_tmofunc,
1279 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1283 * If heart beat timeout called with hb_outstanding set
1284 * we need to give the hb mailbox cmd a chance to
1287 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1288 "0459 Adapter heartbeat still out"
1289 "standing:last compl time was %d ms.\n",
1290 jiffies_to_msecs(jiffies
1291 - phba->last_completion_time));
1292 mod_timer(&phba->hb_tmofunc,
1294 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1297 mod_timer(&phba->hb_tmofunc,
1299 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1304 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1305 * @phba: pointer to lpfc hba data structure.
1307 * This routine is called to bring the HBA offline when HBA hardware error
1308 * other than Port Error 6 has been detected.
1311 lpfc_offline_eratt(struct lpfc_hba *phba)
1313 struct lpfc_sli *psli = &phba->sli;
1315 spin_lock_irq(&phba->hbalock);
1316 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1317 spin_unlock_irq(&phba->hbalock);
1318 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1321 lpfc_reset_barrier(phba);
1322 spin_lock_irq(&phba->hbalock);
1323 lpfc_sli_brdreset(phba);
1324 spin_unlock_irq(&phba->hbalock);
1325 lpfc_hba_down_post(phba);
1326 lpfc_sli_brdready(phba, HS_MBRDY);
1327 lpfc_unblock_mgmt_io(phba);
1328 phba->link_state = LPFC_HBA_ERROR;
1333 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1334 * @phba: pointer to lpfc hba data structure.
1336 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1337 * other than Port Error 6 has been detected.
1340 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1342 spin_lock_irq(&phba->hbalock);
1343 phba->link_state = LPFC_HBA_ERROR;
1344 spin_unlock_irq(&phba->hbalock);
1346 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1348 lpfc_hba_down_post(phba);
1349 lpfc_unblock_mgmt_io(phba);
1353 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1354 * @phba: pointer to lpfc hba data structure.
1356 * This routine is invoked to handle the deferred HBA hardware error
1357 * conditions. This type of error is indicated by HBA by setting ER1
1358 * and another ER bit in the host status register. The driver will
1359 * wait until the ER1 bit clears before handling the error condition.
1362 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1364 uint32_t old_host_status = phba->work_hs;
1365 struct lpfc_sli *psli = &phba->sli;
1367 /* If the pci channel is offline, ignore possible errors,
1368 * since we cannot communicate with the pci card anyway.
1370 if (pci_channel_offline(phba->pcidev)) {
1371 spin_lock_irq(&phba->hbalock);
1372 phba->hba_flag &= ~DEFER_ERATT;
1373 spin_unlock_irq(&phba->hbalock);
1377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1378 "0479 Deferred Adapter Hardware Error "
1379 "Data: x%x x%x x%x\n",
1381 phba->work_status[0], phba->work_status[1]);
1383 spin_lock_irq(&phba->hbalock);
1384 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1385 spin_unlock_irq(&phba->hbalock);
1389 * Firmware stops when it triggred erratt. That could cause the I/Os
1390 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1391 * SCSI layer retry it after re-establishing link.
1393 lpfc_sli_abort_fcp_rings(phba);
1396 * There was a firmware error. Take the hba offline and then
1397 * attempt to restart it.
1399 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1402 /* Wait for the ER1 bit to clear.*/
1403 while (phba->work_hs & HS_FFER1) {
1405 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1406 phba->work_hs = UNPLUG_ERR ;
1409 /* If driver is unloading let the worker thread continue */
1410 if (phba->pport->load_flag & FC_UNLOADING) {
1417 * This is to ptrotect against a race condition in which
1418 * first write to the host attention register clear the
1419 * host status register.
1421 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1422 phba->work_hs = old_host_status & ~HS_FFER1;
1424 spin_lock_irq(&phba->hbalock);
1425 phba->hba_flag &= ~DEFER_ERATT;
1426 spin_unlock_irq(&phba->hbalock);
1427 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1428 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1432 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1434 struct lpfc_board_event_header board_event;
1435 struct Scsi_Host *shost;
1437 board_event.event_type = FC_REG_BOARD_EVENT;
1438 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1439 shost = lpfc_shost_from_vport(phba->pport);
1440 fc_host_post_vendor_event(shost, fc_get_event_number(),
1441 sizeof(board_event),
1442 (char *) &board_event,
1447 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1448 * @phba: pointer to lpfc hba data structure.
1450 * This routine is invoked to handle the following HBA hardware error
1452 * 1 - HBA error attention interrupt
1453 * 2 - DMA ring index out of range
1454 * 3 - Mailbox command came back as unknown
1457 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1459 struct lpfc_vport *vport = phba->pport;
1460 struct lpfc_sli *psli = &phba->sli;
1461 uint32_t event_data;
1462 unsigned long temperature;
1463 struct temp_event temp_event_data;
1464 struct Scsi_Host *shost;
1466 /* If the pci channel is offline, ignore possible errors,
1467 * since we cannot communicate with the pci card anyway.
1469 if (pci_channel_offline(phba->pcidev)) {
1470 spin_lock_irq(&phba->hbalock);
1471 phba->hba_flag &= ~DEFER_ERATT;
1472 spin_unlock_irq(&phba->hbalock);
1476 /* If resets are disabled then leave the HBA alone and return */
1477 if (!phba->cfg_enable_hba_reset)
1480 /* Send an internal error event to mgmt application */
1481 lpfc_board_errevt_to_mgmt(phba);
1483 if (phba->hba_flag & DEFER_ERATT)
1484 lpfc_handle_deferred_eratt(phba);
1486 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1487 if (phba->work_hs & HS_FFER6)
1488 /* Re-establishing Link */
1489 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1490 "1301 Re-establishing Link "
1491 "Data: x%x x%x x%x\n",
1492 phba->work_hs, phba->work_status[0],
1493 phba->work_status[1]);
1494 if (phba->work_hs & HS_FFER8)
1495 /* Device Zeroization */
1496 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1497 "2861 Host Authentication device "
1498 "zeroization Data:x%x x%x x%x\n",
1499 phba->work_hs, phba->work_status[0],
1500 phba->work_status[1]);
1502 spin_lock_irq(&phba->hbalock);
1503 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1504 spin_unlock_irq(&phba->hbalock);
1507 * Firmware stops when it triggled erratt with HS_FFER6.
1508 * That could cause the I/Os dropped by the firmware.
1509 * Error iocb (I/O) on txcmplq and let the SCSI layer
1510 * retry it after re-establishing link.
1512 lpfc_sli_abort_fcp_rings(phba);
1515 * There was a firmware error. Take the hba offline and then
1516 * attempt to restart it.
1518 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1520 lpfc_sli_brdrestart(phba);
1521 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1522 lpfc_unblock_mgmt_io(phba);
1525 lpfc_unblock_mgmt_io(phba);
1526 } else if (phba->work_hs & HS_CRIT_TEMP) {
1527 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1528 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1529 temp_event_data.event_code = LPFC_CRIT_TEMP;
1530 temp_event_data.data = (uint32_t)temperature;
1532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1533 "0406 Adapter maximum temperature exceeded "
1534 "(%ld), taking this port offline "
1535 "Data: x%x x%x x%x\n",
1536 temperature, phba->work_hs,
1537 phba->work_status[0], phba->work_status[1]);
1539 shost = lpfc_shost_from_vport(phba->pport);
1540 fc_host_post_vendor_event(shost, fc_get_event_number(),
1541 sizeof(temp_event_data),
1542 (char *) &temp_event_data,
1543 SCSI_NL_VID_TYPE_PCI
1544 | PCI_VENDOR_ID_EMULEX);
1546 spin_lock_irq(&phba->hbalock);
1547 phba->over_temp_state = HBA_OVER_TEMP;
1548 spin_unlock_irq(&phba->hbalock);
1549 lpfc_offline_eratt(phba);
1552 /* The if clause above forces this code path when the status
1553 * failure is a value other than FFER6. Do not call the offline
1554 * twice. This is the adapter hardware error path.
1556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1557 "0457 Adapter Hardware Error "
1558 "Data: x%x x%x x%x\n",
1560 phba->work_status[0], phba->work_status[1]);
1562 event_data = FC_REG_DUMP_EVENT;
1563 shost = lpfc_shost_from_vport(vport);
1564 fc_host_post_vendor_event(shost, fc_get_event_number(),
1565 sizeof(event_data), (char *) &event_data,
1566 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1568 lpfc_offline_eratt(phba);
1574 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1575 * @phba: pointer to lpfc hba data structure.
1576 * @mbx_action: flag for mailbox shutdown action.
1578 * This routine is invoked to perform an SLI4 port PCI function reset in
1579 * response to port status register polling attention. It waits for port
1580 * status register (ERR, RDY, RN) bits before proceeding with function reset.
1581 * During this process, interrupt vectors are freed and later requested
1582 * for handling possible port resource change.
1585 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1591 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1592 LPFC_SLI_INTF_IF_TYPE_2) {
1594 * On error status condition, driver need to wait for port
1595 * ready before performing reset.
1597 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1602 /* need reset: attempt for port recovery */
1604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1605 "2887 Reset Needed: Attempting Port "
1607 lpfc_offline_prep(phba, mbx_action);
1609 /* release interrupt for possible resource change */
1610 lpfc_sli4_disable_intr(phba);
1611 lpfc_sli_brdrestart(phba);
1612 /* request and enable interrupt */
1613 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1614 if (intr_mode == LPFC_INTR_ERROR) {
1615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1616 "3175 Failed to enable interrupt\n");
1619 phba->intr_mode = intr_mode;
1620 rc = lpfc_online(phba);
1622 lpfc_unblock_mgmt_io(phba);
1628 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1629 * @phba: pointer to lpfc hba data structure.
1631 * This routine is invoked to handle the SLI4 HBA hardware error attention
1635 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1637 struct lpfc_vport *vport = phba->pport;
1638 uint32_t event_data;
1639 struct Scsi_Host *shost;
1641 struct lpfc_register portstat_reg = {0};
1642 uint32_t reg_err1, reg_err2;
1643 uint32_t uerrlo_reg, uemasklo_reg;
1644 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1645 bool en_rn_msg = true;
1646 struct temp_event temp_event_data;
1647 struct lpfc_register portsmphr_reg;
1650 /* If the pci channel is offline, ignore possible errors, since
1651 * we cannot communicate with the pci card anyway.
1653 if (pci_channel_offline(phba->pcidev))
1656 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1657 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1659 case LPFC_SLI_INTF_IF_TYPE_0:
1660 pci_rd_rc1 = lpfc_readl(
1661 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1663 pci_rd_rc2 = lpfc_readl(
1664 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1666 /* consider PCI bus read error as pci_channel_offline */
1667 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1669 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1670 lpfc_sli4_offline_eratt(phba);
1673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1674 "7623 Checking UE recoverable");
1676 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1677 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1678 &portsmphr_reg.word0))
1681 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1683 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1684 LPFC_PORT_SEM_UE_RECOVERABLE)
1686 /*Sleep for 1Sec, before checking SEMAPHORE */
1690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1691 "4827 smphr_port_status x%x : Waited %dSec",
1692 smphr_port_status, i);
1694 /* Recoverable UE, reset the HBA device */
1695 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1696 LPFC_PORT_SEM_UE_RECOVERABLE) {
1697 for (i = 0; i < 20; i++) {
1699 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1700 &portsmphr_reg.word0) &&
1701 (LPFC_POST_STAGE_PORT_READY ==
1702 bf_get(lpfc_port_smphr_port_status,
1704 rc = lpfc_sli4_port_sta_fn_reset(phba,
1705 LPFC_MBX_NO_WAIT, en_rn_msg);
1708 lpfc_printf_log(phba,
1710 "4215 Failed to recover UE");
1715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1716 "7624 Firmware not ready: Failing UE recovery,"
1717 " waited %dSec", i);
1718 lpfc_sli4_offline_eratt(phba);
1721 case LPFC_SLI_INTF_IF_TYPE_2:
1722 pci_rd_rc1 = lpfc_readl(
1723 phba->sli4_hba.u.if_type2.STATUSregaddr,
1724 &portstat_reg.word0);
1725 /* consider PCI bus read error as pci_channel_offline */
1726 if (pci_rd_rc1 == -EIO) {
1727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1728 "3151 PCI bus read access failure: x%x\n",
1729 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1732 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1733 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1734 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1736 "2889 Port Overtemperature event, "
1737 "taking port offline Data: x%x x%x\n",
1738 reg_err1, reg_err2);
1740 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
1741 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1742 temp_event_data.event_code = LPFC_CRIT_TEMP;
1743 temp_event_data.data = 0xFFFFFFFF;
1745 shost = lpfc_shost_from_vport(phba->pport);
1746 fc_host_post_vendor_event(shost, fc_get_event_number(),
1747 sizeof(temp_event_data),
1748 (char *)&temp_event_data,
1749 SCSI_NL_VID_TYPE_PCI
1750 | PCI_VENDOR_ID_EMULEX);
1752 spin_lock_irq(&phba->hbalock);
1753 phba->over_temp_state = HBA_OVER_TEMP;
1754 spin_unlock_irq(&phba->hbalock);
1755 lpfc_sli4_offline_eratt(phba);
1758 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1759 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1761 "3143 Port Down: Firmware Update "
1764 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1765 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1767 "3144 Port Down: Debug Dump\n");
1768 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1769 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1771 "3145 Port Down: Provisioning\n");
1773 /* If resets are disabled then leave the HBA alone and return */
1774 if (!phba->cfg_enable_hba_reset)
1777 /* Check port status register for function reset */
1778 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
1781 /* don't report event on forced debug dump */
1782 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1783 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1788 /* fall through for not able to recover */
1789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1790 "3152 Unrecoverable error, bring the port "
1792 lpfc_sli4_offline_eratt(phba);
1794 case LPFC_SLI_INTF_IF_TYPE_1:
1798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1799 "3123 Report dump event to upper layer\n");
1800 /* Send an internal error event to mgmt application */
1801 lpfc_board_errevt_to_mgmt(phba);
1803 event_data = FC_REG_DUMP_EVENT;
1804 shost = lpfc_shost_from_vport(vport);
1805 fc_host_post_vendor_event(shost, fc_get_event_number(),
1806 sizeof(event_data), (char *) &event_data,
1807 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1811 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1812 * @phba: pointer to lpfc HBA data structure.
1814 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1815 * routine from the API jump table function pointer from the lpfc_hba struct.
1819 * Any other value - error.
1822 lpfc_handle_eratt(struct lpfc_hba *phba)
1824 (*phba->lpfc_handle_eratt)(phba);
1828 * lpfc_handle_latt - The HBA link event handler
1829 * @phba: pointer to lpfc hba data structure.
1831 * This routine is invoked from the worker thread to handle a HBA host
1832 * attention link event.
1835 lpfc_handle_latt(struct lpfc_hba *phba)
1837 struct lpfc_vport *vport = phba->pport;
1838 struct lpfc_sli *psli = &phba->sli;
1840 volatile uint32_t control;
1841 struct lpfc_dmabuf *mp;
1844 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1847 goto lpfc_handle_latt_err_exit;
1850 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1853 goto lpfc_handle_latt_free_pmb;
1856 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1859 goto lpfc_handle_latt_free_mp;
1862 /* Cleanup any outstanding ELS commands */
1863 lpfc_els_flush_all_cmd(phba);
1865 psli->slistat.link_event++;
1866 lpfc_read_topology(phba, pmb, mp);
1867 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1869 /* Block ELS IOCBs until we have processed this mbox command */
1870 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1871 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1872 if (rc == MBX_NOT_FINISHED) {
1874 goto lpfc_handle_latt_free_mbuf;
1877 /* Clear Link Attention in HA REG */
1878 spin_lock_irq(&phba->hbalock);
1879 writel(HA_LATT, phba->HAregaddr);
1880 readl(phba->HAregaddr); /* flush */
1881 spin_unlock_irq(&phba->hbalock);
1885 lpfc_handle_latt_free_mbuf:
1886 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1887 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1888 lpfc_handle_latt_free_mp:
1890 lpfc_handle_latt_free_pmb:
1891 mempool_free(pmb, phba->mbox_mem_pool);
1892 lpfc_handle_latt_err_exit:
1893 /* Enable Link attention interrupts */
1894 spin_lock_irq(&phba->hbalock);
1895 psli->sli_flag |= LPFC_PROCESS_LA;
1896 control = readl(phba->HCregaddr);
1897 control |= HC_LAINT_ENA;
1898 writel(control, phba->HCregaddr);
1899 readl(phba->HCregaddr); /* flush */
1901 /* Clear Link Attention in HA REG */
1902 writel(HA_LATT, phba->HAregaddr);
1903 readl(phba->HAregaddr); /* flush */
1904 spin_unlock_irq(&phba->hbalock);
1905 lpfc_linkdown(phba);
1906 phba->link_state = LPFC_HBA_ERROR;
1908 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1909 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1915 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1916 * @phba: pointer to lpfc hba data structure.
1917 * @vpd: pointer to the vital product data.
1918 * @len: length of the vital product data in bytes.
1920 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1921 * an array of characters. In this routine, the ModelName, ProgramType, and
1922 * ModelDesc, etc. fields of the phba data structure will be populated.
1925 * 0 - pointer to the VPD passed in is NULL
1929 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1931 uint8_t lenlo, lenhi;
1941 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1942 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1943 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1945 while (!finished && (index < (len - 4))) {
1946 switch (vpd[index]) {
1954 i = ((((unsigned short)lenhi) << 8) + lenlo);
1963 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1964 if (Length > len - index)
1965 Length = len - index;
1966 while (Length > 0) {
1967 /* Look for Serial Number */
1968 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1975 phba->SerialNumber[j++] = vpd[index++];
1979 phba->SerialNumber[j] = 0;
1982 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1983 phba->vpd_flag |= VPD_MODEL_DESC;
1990 phba->ModelDesc[j++] = vpd[index++];
1994 phba->ModelDesc[j] = 0;
1997 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1998 phba->vpd_flag |= VPD_MODEL_NAME;
2005 phba->ModelName[j++] = vpd[index++];
2009 phba->ModelName[j] = 0;
2012 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2013 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2020 phba->ProgramType[j++] = vpd[index++];
2024 phba->ProgramType[j] = 0;
2027 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2028 phba->vpd_flag |= VPD_PORT;
2035 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2036 (phba->sli4_hba.pport_name_sta ==
2037 LPFC_SLI4_PPNAME_GET)) {
2041 phba->Port[j++] = vpd[index++];
2045 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2046 (phba->sli4_hba.pport_name_sta ==
2047 LPFC_SLI4_PPNAME_NON))
2074 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2075 * @phba: pointer to lpfc hba data structure.
2076 * @mdp: pointer to the data structure to hold the derived model name.
2077 * @descp: pointer to the data structure to hold the derived description.
2079 * This routine retrieves HBA's description based on its registered PCI device
2080 * ID. The @descp passed into this function points to an array of 256 chars. It
2081 * shall be returned with the model name, maximum speed, and the host bus type.
2082 * The @mdp passed into this function points to an array of 80 chars. When the
2083 * function returns, the @mdp will be filled with the model name.
2086 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2089 uint16_t dev_id = phba->pcidev->device;
2092 int oneConnect = 0; /* default is not a oneConnect */
2097 } m = {"<Unknown>", "", ""};
2099 if (mdp && mdp[0] != '\0'
2100 && descp && descp[0] != '\0')
2103 if (phba->lmt & LMT_32Gb)
2105 else if (phba->lmt & LMT_16Gb)
2107 else if (phba->lmt & LMT_10Gb)
2109 else if (phba->lmt & LMT_8Gb)
2111 else if (phba->lmt & LMT_4Gb)
2113 else if (phba->lmt & LMT_2Gb)
2115 else if (phba->lmt & LMT_1Gb)
2123 case PCI_DEVICE_ID_FIREFLY:
2124 m = (typeof(m)){"LP6000", "PCI",
2125 "Obsolete, Unsupported Fibre Channel Adapter"};
2127 case PCI_DEVICE_ID_SUPERFLY:
2128 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2129 m = (typeof(m)){"LP7000", "PCI", ""};
2131 m = (typeof(m)){"LP7000E", "PCI", ""};
2132 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2134 case PCI_DEVICE_ID_DRAGONFLY:
2135 m = (typeof(m)){"LP8000", "PCI",
2136 "Obsolete, Unsupported Fibre Channel Adapter"};
2138 case PCI_DEVICE_ID_CENTAUR:
2139 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2140 m = (typeof(m)){"LP9002", "PCI", ""};
2142 m = (typeof(m)){"LP9000", "PCI", ""};
2143 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2145 case PCI_DEVICE_ID_RFLY:
2146 m = (typeof(m)){"LP952", "PCI",
2147 "Obsolete, Unsupported Fibre Channel Adapter"};
2149 case PCI_DEVICE_ID_PEGASUS:
2150 m = (typeof(m)){"LP9802", "PCI-X",
2151 "Obsolete, Unsupported Fibre Channel Adapter"};
2153 case PCI_DEVICE_ID_THOR:
2154 m = (typeof(m)){"LP10000", "PCI-X",
2155 "Obsolete, Unsupported Fibre Channel Adapter"};
2157 case PCI_DEVICE_ID_VIPER:
2158 m = (typeof(m)){"LPX1000", "PCI-X",
2159 "Obsolete, Unsupported Fibre Channel Adapter"};
2161 case PCI_DEVICE_ID_PFLY:
2162 m = (typeof(m)){"LP982", "PCI-X",
2163 "Obsolete, Unsupported Fibre Channel Adapter"};
2165 case PCI_DEVICE_ID_TFLY:
2166 m = (typeof(m)){"LP1050", "PCI-X",
2167 "Obsolete, Unsupported Fibre Channel Adapter"};
2169 case PCI_DEVICE_ID_HELIOS:
2170 m = (typeof(m)){"LP11000", "PCI-X2",
2171 "Obsolete, Unsupported Fibre Channel Adapter"};
2173 case PCI_DEVICE_ID_HELIOS_SCSP:
2174 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2175 "Obsolete, Unsupported Fibre Channel Adapter"};
2177 case PCI_DEVICE_ID_HELIOS_DCSP:
2178 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2179 "Obsolete, Unsupported Fibre Channel Adapter"};
2181 case PCI_DEVICE_ID_NEPTUNE:
2182 m = (typeof(m)){"LPe1000", "PCIe",
2183 "Obsolete, Unsupported Fibre Channel Adapter"};
2185 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2186 m = (typeof(m)){"LPe1000-SP", "PCIe",
2187 "Obsolete, Unsupported Fibre Channel Adapter"};
2189 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2190 m = (typeof(m)){"LPe1002-SP", "PCIe",
2191 "Obsolete, Unsupported Fibre Channel Adapter"};
2193 case PCI_DEVICE_ID_BMID:
2194 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2196 case PCI_DEVICE_ID_BSMB:
2197 m = (typeof(m)){"LP111", "PCI-X2",
2198 "Obsolete, Unsupported Fibre Channel Adapter"};
2200 case PCI_DEVICE_ID_ZEPHYR:
2201 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2203 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2204 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2206 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2207 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2210 case PCI_DEVICE_ID_ZMID:
2211 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2213 case PCI_DEVICE_ID_ZSMB:
2214 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2216 case PCI_DEVICE_ID_LP101:
2217 m = (typeof(m)){"LP101", "PCI-X",
2218 "Obsolete, Unsupported Fibre Channel Adapter"};
2220 case PCI_DEVICE_ID_LP10000S:
2221 m = (typeof(m)){"LP10000-S", "PCI",
2222 "Obsolete, Unsupported Fibre Channel Adapter"};
2224 case PCI_DEVICE_ID_LP11000S:
2225 m = (typeof(m)){"LP11000-S", "PCI-X2",
2226 "Obsolete, Unsupported Fibre Channel Adapter"};
2228 case PCI_DEVICE_ID_LPE11000S:
2229 m = (typeof(m)){"LPe11000-S", "PCIe",
2230 "Obsolete, Unsupported Fibre Channel Adapter"};
2232 case PCI_DEVICE_ID_SAT:
2233 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2235 case PCI_DEVICE_ID_SAT_MID:
2236 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2238 case PCI_DEVICE_ID_SAT_SMB:
2239 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2241 case PCI_DEVICE_ID_SAT_DCSP:
2242 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2244 case PCI_DEVICE_ID_SAT_SCSP:
2245 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2247 case PCI_DEVICE_ID_SAT_S:
2248 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2250 case PCI_DEVICE_ID_HORNET:
2251 m = (typeof(m)){"LP21000", "PCIe",
2252 "Obsolete, Unsupported FCoE Adapter"};
2255 case PCI_DEVICE_ID_PROTEUS_VF:
2256 m = (typeof(m)){"LPev12000", "PCIe IOV",
2257 "Obsolete, Unsupported Fibre Channel Adapter"};
2259 case PCI_DEVICE_ID_PROTEUS_PF:
2260 m = (typeof(m)){"LPev12000", "PCIe IOV",
2261 "Obsolete, Unsupported Fibre Channel Adapter"};
2263 case PCI_DEVICE_ID_PROTEUS_S:
2264 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2265 "Obsolete, Unsupported Fibre Channel Adapter"};
2267 case PCI_DEVICE_ID_TIGERSHARK:
2269 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2271 case PCI_DEVICE_ID_TOMCAT:
2273 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2275 case PCI_DEVICE_ID_FALCON:
2276 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2277 "EmulexSecure Fibre"};
2279 case PCI_DEVICE_ID_BALIUS:
2280 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2281 "Obsolete, Unsupported Fibre Channel Adapter"};
2283 case PCI_DEVICE_ID_LANCER_FC:
2284 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2286 case PCI_DEVICE_ID_LANCER_FC_VF:
2287 m = (typeof(m)){"LPe16000", "PCIe",
2288 "Obsolete, Unsupported Fibre Channel Adapter"};
2290 case PCI_DEVICE_ID_LANCER_FCOE:
2292 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2294 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2296 m = (typeof(m)){"OCe15100", "PCIe",
2297 "Obsolete, Unsupported FCoE"};
2299 case PCI_DEVICE_ID_LANCER_G6_FC:
2300 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2302 case PCI_DEVICE_ID_SKYHAWK:
2303 case PCI_DEVICE_ID_SKYHAWK_VF:
2305 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2308 m = (typeof(m)){"Unknown", "", ""};
2312 if (mdp && mdp[0] == '\0')
2313 snprintf(mdp, 79,"%s", m.name);
2315 * oneConnect hba requires special processing, they are all initiators
2316 * and we put the port number on the end
2318 if (descp && descp[0] == '\0') {
2320 snprintf(descp, 255,
2321 "Emulex OneConnect %s, %s Initiator %s",
2324 else if (max_speed == 0)
2325 snprintf(descp, 255,
2327 m.name, m.bus, m.function);
2329 snprintf(descp, 255,
2330 "Emulex %s %d%s %s %s",
2331 m.name, max_speed, (GE) ? "GE" : "Gb",
2337 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2338 * @phba: pointer to lpfc hba data structure.
2339 * @pring: pointer to a IOCB ring.
2340 * @cnt: the number of IOCBs to be posted to the IOCB ring.
2342 * This routine posts a given number of IOCBs with the associated DMA buffer
2343 * descriptors specified by the cnt argument to the given IOCB ring.
2346 * The number of IOCBs NOT able to be posted to the IOCB ring.
2349 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2352 struct lpfc_iocbq *iocb;
2353 struct lpfc_dmabuf *mp1, *mp2;
2355 cnt += pring->missbufcnt;
2357 /* While there are buffers to post */
2359 /* Allocate buffer for command iocb */
2360 iocb = lpfc_sli_get_iocbq(phba);
2362 pring->missbufcnt = cnt;
2367 /* 2 buffers can be posted per command */
2368 /* Allocate buffer to post */
2369 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2371 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2372 if (!mp1 || !mp1->virt) {
2374 lpfc_sli_release_iocbq(phba, iocb);
2375 pring->missbufcnt = cnt;
2379 INIT_LIST_HEAD(&mp1->list);
2380 /* Allocate buffer to post */
2382 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2384 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2386 if (!mp2 || !mp2->virt) {
2388 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2390 lpfc_sli_release_iocbq(phba, iocb);
2391 pring->missbufcnt = cnt;
2395 INIT_LIST_HEAD(&mp2->list);
2400 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2401 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2402 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2403 icmd->ulpBdeCount = 1;
2406 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2407 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2408 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2410 icmd->ulpBdeCount = 2;
2413 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2416 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2418 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2422 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2426 lpfc_sli_release_iocbq(phba, iocb);
2427 pring->missbufcnt = cnt;
2430 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2432 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2434 pring->missbufcnt = 0;
2439 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2440 * @phba: pointer to lpfc hba data structure.
2442 * This routine posts initial receive IOCB buffers to the ELS ring. The
2443 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2447 * 0 - success (currently always success)
2450 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2452 struct lpfc_sli *psli = &phba->sli;
2454 /* Ring 0, ELS / CT buffers */
2455 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2456 /* Ring 2 - FCP no buffers needed */
2461 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2464 * lpfc_sha_init - Set up initial array of hash table entries
2465 * @HashResultPointer: pointer to an array as hash table.
2467 * This routine sets up the initial values to the array of hash table entries
2471 lpfc_sha_init(uint32_t * HashResultPointer)
2473 HashResultPointer[0] = 0x67452301;
2474 HashResultPointer[1] = 0xEFCDAB89;
2475 HashResultPointer[2] = 0x98BADCFE;
2476 HashResultPointer[3] = 0x10325476;
2477 HashResultPointer[4] = 0xC3D2E1F0;
2481 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2482 * @HashResultPointer: pointer to an initial/result hash table.
2483 * @HashWorkingPointer: pointer to an working hash table.
2485 * This routine iterates an initial hash table pointed by @HashResultPointer
2486 * with the values from the working hash table pointeed by @HashWorkingPointer.
2487 * The results are putting back to the initial hash table, returned through
2488 * the @HashResultPointer as the result hash table.
2491 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2495 uint32_t A, B, C, D, E;
2498 HashWorkingPointer[t] =
2500 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2502 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2503 } while (++t <= 79);
2505 A = HashResultPointer[0];
2506 B = HashResultPointer[1];
2507 C = HashResultPointer[2];
2508 D = HashResultPointer[3];
2509 E = HashResultPointer[4];
2513 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2514 } else if (t < 40) {
2515 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2516 } else if (t < 60) {
2517 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2519 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2521 TEMP += S(5, A) + E + HashWorkingPointer[t];
2527 } while (++t <= 79);
2529 HashResultPointer[0] += A;
2530 HashResultPointer[1] += B;
2531 HashResultPointer[2] += C;
2532 HashResultPointer[3] += D;
2533 HashResultPointer[4] += E;
2538 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2539 * @RandomChallenge: pointer to the entry of host challenge random number array.
2540 * @HashWorking: pointer to the entry of the working hash array.
2542 * This routine calculates the working hash array referred by @HashWorking
2543 * from the challenge random numbers associated with the host, referred by
2544 * @RandomChallenge. The result is put into the entry of the working hash
2545 * array and returned by reference through @HashWorking.
2548 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2550 *HashWorking = (*RandomChallenge ^ *HashWorking);
2554 * lpfc_hba_init - Perform special handling for LC HBA initialization
2555 * @phba: pointer to lpfc hba data structure.
2556 * @hbainit: pointer to an array of unsigned 32-bit integers.
2558 * This routine performs the special handling for LC HBA initialization.
2561 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2564 uint32_t *HashWorking;
2565 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2567 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2571 HashWorking[0] = HashWorking[78] = *pwwnn++;
2572 HashWorking[1] = HashWorking[79] = *pwwnn;
2574 for (t = 0; t < 7; t++)
2575 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2577 lpfc_sha_init(hbainit);
2578 lpfc_sha_iterate(hbainit, HashWorking);
2583 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2584 * @vport: pointer to a virtual N_Port data structure.
2586 * This routine performs the necessary cleanups before deleting the @vport.
2587 * It invokes the discovery state machine to perform necessary state
2588 * transitions and to release the ndlps associated with the @vport. Note,
2589 * the physical port is treated as @vport 0.
2592 lpfc_cleanup(struct lpfc_vport *vport)
2594 struct lpfc_hba *phba = vport->phba;
2595 struct lpfc_nodelist *ndlp, *next_ndlp;
2598 if (phba->link_state > LPFC_LINK_DOWN)
2599 lpfc_port_link_failure(vport);
2601 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2602 if (!NLP_CHK_NODE_ACT(ndlp)) {
2603 ndlp = lpfc_enable_node(vport, ndlp,
2604 NLP_STE_UNUSED_NODE);
2607 spin_lock_irq(&phba->ndlp_lock);
2608 NLP_SET_FREE_REQ(ndlp);
2609 spin_unlock_irq(&phba->ndlp_lock);
2610 /* Trigger the release of the ndlp memory */
2614 spin_lock_irq(&phba->ndlp_lock);
2615 if (NLP_CHK_FREE_REQ(ndlp)) {
2616 /* The ndlp should not be in memory free mode already */
2617 spin_unlock_irq(&phba->ndlp_lock);
2620 /* Indicate request for freeing ndlp memory */
2621 NLP_SET_FREE_REQ(ndlp);
2622 spin_unlock_irq(&phba->ndlp_lock);
2624 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2625 ndlp->nlp_DID == Fabric_DID) {
2626 /* Just free up ndlp with Fabric_DID for vports */
2631 /* take care of nodes in unused state before the state
2632 * machine taking action.
2634 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2639 if (ndlp->nlp_type & NLP_FABRIC)
2640 lpfc_disc_state_machine(vport, ndlp, NULL,
2641 NLP_EVT_DEVICE_RECOVERY);
2643 lpfc_disc_state_machine(vport, ndlp, NULL,
2647 /* At this point, ALL ndlp's should be gone
2648 * because of the previous NLP_EVT_DEVICE_RM.
2649 * Lets wait for this to happen, if needed.
2651 while (!list_empty(&vport->fc_nodes)) {
2653 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2654 "0233 Nodelist not empty\n");
2655 list_for_each_entry_safe(ndlp, next_ndlp,
2656 &vport->fc_nodes, nlp_listp) {
2657 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2659 "0282 did:x%x ndlp:x%p "
2660 "usgmap:x%x refcnt:%d\n",
2661 ndlp->nlp_DID, (void *)ndlp,
2663 kref_read(&ndlp->kref));
2668 /* Wait for any activity on ndlps to settle */
2671 lpfc_cleanup_vports_rrqs(vport, NULL);
2675 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2676 * @vport: pointer to a virtual N_Port data structure.
2678 * This routine stops all the timers associated with a @vport. This function
2679 * is invoked before disabling or deleting a @vport. Note that the physical
2680 * port is treated as @vport 0.
2683 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2685 del_timer_sync(&vport->els_tmofunc);
2686 del_timer_sync(&vport->delayed_disc_tmo);
2687 lpfc_can_disctmo(vport);
2692 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2693 * @phba: pointer to lpfc hba data structure.
2695 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2696 * caller of this routine should already hold the host lock.
2699 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2701 /* Clear pending FCF rediscovery wait flag */
2702 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2704 /* Now, try to stop the timer */
2705 del_timer(&phba->fcf.redisc_wait);
2709 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2710 * @phba: pointer to lpfc hba data structure.
2712 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2713 * checks whether the FCF rediscovery wait timer is pending with the host
2714 * lock held before proceeding with disabling the timer and clearing the
2715 * wait timer pendig flag.
2718 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2720 spin_lock_irq(&phba->hbalock);
2721 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2722 /* FCF rediscovery timer already fired or stopped */
2723 spin_unlock_irq(&phba->hbalock);
2726 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2727 /* Clear failover in progress flags */
2728 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2729 spin_unlock_irq(&phba->hbalock);
2733 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2734 * @phba: pointer to lpfc hba data structure.
2736 * This routine stops all the timers associated with a HBA. This function is
2737 * invoked before either putting a HBA offline or unloading the driver.
2740 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2742 lpfc_stop_vport_timers(phba->pport);
2743 del_timer_sync(&phba->sli.mbox_tmo);
2744 del_timer_sync(&phba->fabric_block_timer);
2745 del_timer_sync(&phba->eratt_poll);
2746 del_timer_sync(&phba->hb_tmofunc);
2747 if (phba->sli_rev == LPFC_SLI_REV4) {
2748 del_timer_sync(&phba->rrq_tmr);
2749 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2751 phba->hb_outstanding = 0;
2753 switch (phba->pci_dev_grp) {
2754 case LPFC_PCI_DEV_LP:
2755 /* Stop any LightPulse device specific driver timers */
2756 del_timer_sync(&phba->fcp_poll_timer);
2758 case LPFC_PCI_DEV_OC:
2759 /* Stop any OneConnect device sepcific driver timers */
2760 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2764 "0297 Invalid device group (x%x)\n",
2772 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2773 * @phba: pointer to lpfc hba data structure.
2775 * This routine marks a HBA's management interface as blocked. Once the HBA's
2776 * management interface is marked as blocked, all the user space access to
2777 * the HBA, whether they are from sysfs interface or libdfc interface will
2778 * all be blocked. The HBA is set to block the management interface when the
2779 * driver prepares the HBA interface for online or offline.
2782 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2784 unsigned long iflag;
2785 uint8_t actcmd = MBX_HEARTBEAT;
2786 unsigned long timeout;
2788 spin_lock_irqsave(&phba->hbalock, iflag);
2789 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 if (mbx_action == LPFC_MBX_NO_WAIT)
2793 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2794 spin_lock_irqsave(&phba->hbalock, iflag);
2795 if (phba->sli.mbox_active) {
2796 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2797 /* Determine how long we might wait for the active mailbox
2798 * command to be gracefully completed by firmware.
2800 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2801 phba->sli.mbox_active) * 1000) + jiffies;
2803 spin_unlock_irqrestore(&phba->hbalock, iflag);
2805 /* Wait for the outstnading mailbox command to complete */
2806 while (phba->sli.mbox_active) {
2807 /* Check active mailbox complete status every 2ms */
2809 if (time_after(jiffies, timeout)) {
2810 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2811 "2813 Mgmt IO is Blocked %x "
2812 "- mbox cmd %x still active\n",
2813 phba->sli.sli_flag, actcmd);
2820 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2821 * @phba: pointer to lpfc hba data structure.
2823 * Allocate RPIs for all active remote nodes. This is needed whenever
2824 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2825 * is to fixup the temporary rpi assignments.
2828 lpfc_sli4_node_prep(struct lpfc_hba *phba)
2830 struct lpfc_nodelist *ndlp, *next_ndlp;
2831 struct lpfc_vport **vports;
2834 if (phba->sli_rev != LPFC_SLI_REV4)
2837 vports = lpfc_create_vport_work_array(phba);
2838 if (vports != NULL) {
2839 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2840 if (vports[i]->load_flag & FC_UNLOADING)
2843 list_for_each_entry_safe(ndlp, next_ndlp,
2844 &vports[i]->fc_nodes,
2846 if (NLP_CHK_NODE_ACT(ndlp)) {
2848 lpfc_sli4_alloc_rpi(phba);
2849 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2851 "0009 rpi:%x DID:%x "
2852 "flg:%x map:%x %p\n",
2862 lpfc_destroy_vport_work_array(phba, vports);
2866 * lpfc_online - Initialize and bring a HBA online
2867 * @phba: pointer to lpfc hba data structure.
2869 * This routine initializes the HBA and brings a HBA online. During this
2870 * process, the management interface is blocked to prevent user space access
2871 * to the HBA interfering with the driver initialization.
2878 lpfc_online(struct lpfc_hba *phba)
2880 struct lpfc_vport *vport;
2881 struct lpfc_vport **vports;
2883 bool vpis_cleared = false;
2887 vport = phba->pport;
2889 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2892 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2893 "0458 Bring Adapter online\n");
2895 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2897 if (!lpfc_sli_queue_setup(phba)) {
2898 lpfc_unblock_mgmt_io(phba);
2902 if (phba->sli_rev == LPFC_SLI_REV4) {
2903 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2904 lpfc_unblock_mgmt_io(phba);
2907 spin_lock_irq(&phba->hbalock);
2908 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2909 vpis_cleared = true;
2910 spin_unlock_irq(&phba->hbalock);
2912 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2913 lpfc_unblock_mgmt_io(phba);
2918 vports = lpfc_create_vport_work_array(phba);
2919 if (vports != NULL) {
2920 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2921 struct Scsi_Host *shost;
2922 shost = lpfc_shost_from_vport(vports[i]);
2923 spin_lock_irq(shost->host_lock);
2924 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2925 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2926 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2927 if (phba->sli_rev == LPFC_SLI_REV4) {
2928 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2929 if ((vpis_cleared) &&
2930 (vports[i]->port_type !=
2931 LPFC_PHYSICAL_PORT))
2934 spin_unlock_irq(shost->host_lock);
2937 lpfc_destroy_vport_work_array(phba, vports);
2939 lpfc_unblock_mgmt_io(phba);
2944 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2945 * @phba: pointer to lpfc hba data structure.
2947 * This routine marks a HBA's management interface as not blocked. Once the
2948 * HBA's management interface is marked as not blocked, all the user space
2949 * access to the HBA, whether they are from sysfs interface or libdfc
2950 * interface will be allowed. The HBA is set to block the management interface
2951 * when the driver prepares the HBA interface for online or offline and then
2952 * set to unblock the management interface afterwards.
2955 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2957 unsigned long iflag;
2959 spin_lock_irqsave(&phba->hbalock, iflag);
2960 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2961 spin_unlock_irqrestore(&phba->hbalock, iflag);
2965 * lpfc_offline_prep - Prepare a HBA to be brought offline
2966 * @phba: pointer to lpfc hba data structure.
2968 * This routine is invoked to prepare a HBA to be brought offline. It performs
2969 * unregistration login to all the nodes on all vports and flushes the mailbox
2970 * queue to make it ready to be brought offline.
2973 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
2975 struct lpfc_vport *vport = phba->pport;
2976 struct lpfc_nodelist *ndlp, *next_ndlp;
2977 struct lpfc_vport **vports;
2978 struct Scsi_Host *shost;
2981 if (vport->fc_flag & FC_OFFLINE_MODE)
2984 lpfc_block_mgmt_io(phba, mbx_action);
2986 lpfc_linkdown(phba);
2988 /* Issue an unreg_login to all nodes on all vports */
2989 vports = lpfc_create_vport_work_array(phba);
2990 if (vports != NULL) {
2991 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2992 if (vports[i]->load_flag & FC_UNLOADING)
2994 shost = lpfc_shost_from_vport(vports[i]);
2995 spin_lock_irq(shost->host_lock);
2996 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2997 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2998 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2999 spin_unlock_irq(shost->host_lock);
3001 shost = lpfc_shost_from_vport(vports[i]);
3002 list_for_each_entry_safe(ndlp, next_ndlp,
3003 &vports[i]->fc_nodes,
3005 if (!NLP_CHK_NODE_ACT(ndlp))
3007 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3009 if (ndlp->nlp_type & NLP_FABRIC) {
3010 lpfc_disc_state_machine(vports[i], ndlp,
3011 NULL, NLP_EVT_DEVICE_RECOVERY);
3012 lpfc_disc_state_machine(vports[i], ndlp,
3013 NULL, NLP_EVT_DEVICE_RM);
3015 spin_lock_irq(shost->host_lock);
3016 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3017 spin_unlock_irq(shost->host_lock);
3019 * Whenever an SLI4 port goes offline, free the
3020 * RPI. Get a new RPI when the adapter port
3021 * comes back online.
3023 if (phba->sli_rev == LPFC_SLI_REV4) {
3024 lpfc_printf_vlog(ndlp->vport,
3025 KERN_INFO, LOG_NODE,
3026 "0011 lpfc_offline: "
3028 "usgmap:x%x rpi:%x\n",
3029 ndlp, ndlp->nlp_DID,
3033 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3035 lpfc_unreg_rpi(vports[i], ndlp);
3039 lpfc_destroy_vport_work_array(phba, vports);
3041 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3045 * lpfc_offline - Bring a HBA offline
3046 * @phba: pointer to lpfc hba data structure.
3048 * This routine actually brings a HBA offline. It stops all the timers
3049 * associated with the HBA, brings down the SLI layer, and eventually
3050 * marks the HBA as in offline state for the upper layer protocol.
3053 lpfc_offline(struct lpfc_hba *phba)
3055 struct Scsi_Host *shost;
3056 struct lpfc_vport **vports;
3059 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3062 /* stop port and all timers associated with this hba */
3063 lpfc_stop_port(phba);
3064 vports = lpfc_create_vport_work_array(phba);
3066 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3067 lpfc_stop_vport_timers(vports[i]);
3068 lpfc_destroy_vport_work_array(phba, vports);
3069 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3070 "0460 Bring Adapter offline\n");
3071 /* Bring down the SLI Layer and cleanup. The HBA is offline
3073 lpfc_sli_hba_down(phba);
3074 spin_lock_irq(&phba->hbalock);
3076 spin_unlock_irq(&phba->hbalock);
3077 vports = lpfc_create_vport_work_array(phba);
3079 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3080 shost = lpfc_shost_from_vport(vports[i]);
3081 spin_lock_irq(shost->host_lock);
3082 vports[i]->work_port_events = 0;
3083 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3084 spin_unlock_irq(shost->host_lock);
3086 lpfc_destroy_vport_work_array(phba, vports);
3090 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3091 * @phba: pointer to lpfc hba data structure.
3093 * This routine is to free all the SCSI buffers and IOCBs from the driver
3094 * list back to kernel. It is called from lpfc_pci_remove_one to free
3095 * the internal resources before the device is removed from the system.
3098 lpfc_scsi_free(struct lpfc_hba *phba)
3100 struct lpfc_scsi_buf *sb, *sb_next;
3101 struct lpfc_iocbq *io, *io_next;
3103 spin_lock_irq(&phba->hbalock);
3105 /* Release all the lpfc_scsi_bufs maintained by this host. */
3107 spin_lock(&phba->scsi_buf_list_put_lock);
3108 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3110 list_del(&sb->list);
3111 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
3114 phba->total_scsi_bufs--;
3116 spin_unlock(&phba->scsi_buf_list_put_lock);
3118 spin_lock(&phba->scsi_buf_list_get_lock);
3119 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3121 list_del(&sb->list);
3122 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
3125 phba->total_scsi_bufs--;
3127 spin_unlock(&phba->scsi_buf_list_get_lock);
3129 /* Release all the lpfc_iocbq entries maintained by this host. */
3130 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
3131 list_del(&io->list);
3133 phba->total_iocbq_bufs--;
3136 spin_unlock_irq(&phba->hbalock);
3140 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
3141 * @phba: pointer to lpfc hba data structure.
3143 * This routine first calculates the sizes of the current els and allocated
3144 * scsi sgl lists, and then goes through all sgls to updates the physical
3145 * XRIs assigned due to port function reset. During port initialization, the
3146 * current els and allocated scsi sgl lists are 0s.
3149 * 0 - successful (for now, it always returns 0)
3152 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3154 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3155 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
3156 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
3157 LIST_HEAD(els_sgl_list);
3158 LIST_HEAD(scsi_sgl_list);
3160 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3163 * update on pci function's els xri-sgl list
3165 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3166 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3167 /* els xri-sgl expanded */
3168 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3169 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3170 "3157 ELS xri-sgl count increased from "
3171 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3173 /* allocate the additional els sgls */
3174 for (i = 0; i < xri_cnt; i++) {
3175 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3177 if (sglq_entry == NULL) {
3178 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3179 "2562 Failure to allocate an "
3180 "ELS sgl entry:%d\n", i);
3184 sglq_entry->buff_type = GEN_BUFF_TYPE;
3185 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3187 if (sglq_entry->virt == NULL) {
3189 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3190 "2563 Failure to allocate an "
3191 "ELS mbuf:%d\n", i);
3195 sglq_entry->sgl = sglq_entry->virt;
3196 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3197 sglq_entry->state = SGL_FREED;
3198 list_add_tail(&sglq_entry->list, &els_sgl_list);
3200 spin_lock_irq(&phba->hbalock);
3201 spin_lock(&pring->ring_lock);
3202 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3203 spin_unlock(&pring->ring_lock);
3204 spin_unlock_irq(&phba->hbalock);
3205 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3206 /* els xri-sgl shrinked */
3207 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3208 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3209 "3158 ELS xri-sgl count decreased from "
3210 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3212 spin_lock_irq(&phba->hbalock);
3213 spin_lock(&pring->ring_lock);
3214 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
3215 spin_unlock(&pring->ring_lock);
3216 spin_unlock_irq(&phba->hbalock);
3217 /* release extra els sgls from list */
3218 for (i = 0; i < xri_cnt; i++) {
3219 list_remove_head(&els_sgl_list,
3220 sglq_entry, struct lpfc_sglq, list);
3222 lpfc_mbuf_free(phba, sglq_entry->virt,
3227 spin_lock_irq(&phba->hbalock);
3228 spin_lock(&pring->ring_lock);
3229 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3230 spin_unlock(&pring->ring_lock);
3231 spin_unlock_irq(&phba->hbalock);
3233 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3234 "3163 ELS xri-sgl count unchanged: %d\n",
3236 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3238 /* update xris to els sgls on the list */
3240 sglq_entry_next = NULL;
3241 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3242 &phba->sli4_hba.lpfc_sgl_list, list) {
3243 lxri = lpfc_sli4_next_xritag(phba);
3244 if (lxri == NO_XRI) {
3245 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3246 "2400 Failed to allocate xri for "
3251 sglq_entry->sli4_lxritag = lxri;
3252 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3256 * update on pci function's allocated scsi xri-sgl list
3258 phba->total_scsi_bufs = 0;
3260 /* maximum number of xris available for scsi buffers */
3261 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3264 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3265 "2401 Current allocated SCSI xri-sgl count:%d, "
3266 "maximum SCSI xri count:%d\n",
3267 phba->sli4_hba.scsi_xri_cnt,
3268 phba->sli4_hba.scsi_xri_max);
3270 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3271 spin_lock(&phba->scsi_buf_list_put_lock);
3272 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3273 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3274 spin_unlock(&phba->scsi_buf_list_put_lock);
3275 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3277 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3278 /* max scsi xri shrinked below the allocated scsi buffers */
3279 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3280 phba->sli4_hba.scsi_xri_max;
3281 /* release the extra allocated scsi buffers */
3282 for (i = 0; i < scsi_xri_cnt; i++) {
3283 list_remove_head(&scsi_sgl_list, psb,
3284 struct lpfc_scsi_buf, list);
3286 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
3287 psb->data, psb->dma_handle);
3291 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3292 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3293 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3296 /* update xris associated to remaining allocated scsi buffers */
3299 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3300 lxri = lpfc_sli4_next_xritag(phba);
3301 if (lxri == NO_XRI) {
3302 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3303 "2560 Failed to allocate xri for "
3308 psb->cur_iocbq.sli4_lxritag = lxri;
3309 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3311 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3312 spin_lock(&phba->scsi_buf_list_put_lock);
3313 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3314 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3315 spin_unlock(&phba->scsi_buf_list_put_lock);
3316 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3321 lpfc_free_els_sgl_list(phba);
3322 lpfc_scsi_free(phba);
3327 * lpfc_create_port - Create an FC port
3328 * @phba: pointer to lpfc hba data structure.
3329 * @instance: a unique integer ID to this FC port.
3330 * @dev: pointer to the device data structure.
3332 * This routine creates a FC port for the upper layer protocol. The FC port
3333 * can be created on top of either a physical port or a virtual port provided
3334 * by the HBA. This routine also allocates a SCSI host data structure (shost)
3335 * and associates the FC port created before adding the shost into the SCSI
3339 * @vport - pointer to the virtual N_Port data structure.
3340 * NULL - port create failed.
3343 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3345 struct lpfc_vport *vport;
3346 struct Scsi_Host *shost;
3349 if (dev != &phba->pcidev->dev) {
3350 shost = scsi_host_alloc(&lpfc_vport_template,
3351 sizeof(struct lpfc_vport));
3353 if (phba->sli_rev == LPFC_SLI_REV4)
3354 shost = scsi_host_alloc(&lpfc_template,
3355 sizeof(struct lpfc_vport));
3357 shost = scsi_host_alloc(&lpfc_template_s3,
3358 sizeof(struct lpfc_vport));
3363 vport = (struct lpfc_vport *) shost->hostdata;
3365 vport->load_flag |= FC_LOADING;
3366 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3367 vport->fc_rscn_flush = 0;
3369 lpfc_get_vport_cfgparam(vport);
3370 shost->unique_id = instance;
3371 shost->max_id = LPFC_MAX_TARGET;
3372 shost->max_lun = vport->cfg_max_luns;
3373 shost->this_id = -1;
3374 shost->max_cmd_len = 16;
3375 shost->nr_hw_queues = phba->cfg_fcp_io_channel;
3376 if (phba->sli_rev == LPFC_SLI_REV4) {
3377 shost->dma_boundary =
3378 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3379 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3383 * Set initial can_queue value since 0 is no longer supported and
3384 * scsi_add_host will fail. This will be adjusted later based on the
3385 * max xri value determined in hba setup.
3387 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3388 if (dev != &phba->pcidev->dev) {
3389 shost->transportt = lpfc_vport_transport_template;
3390 vport->port_type = LPFC_NPIV_PORT;
3392 shost->transportt = lpfc_transport_template;
3393 vport->port_type = LPFC_PHYSICAL_PORT;
3396 /* Initialize all internally managed lists. */
3397 INIT_LIST_HEAD(&vport->fc_nodes);
3398 INIT_LIST_HEAD(&vport->rcv_buffer_list);
3399 spin_lock_init(&vport->work_port_lock);
3401 init_timer(&vport->fc_disctmo);
3402 vport->fc_disctmo.function = lpfc_disc_timeout;
3403 vport->fc_disctmo.data = (unsigned long)vport;
3405 init_timer(&vport->els_tmofunc);
3406 vport->els_tmofunc.function = lpfc_els_timeout;
3407 vport->els_tmofunc.data = (unsigned long)vport;
3409 init_timer(&vport->delayed_disc_tmo);
3410 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
3411 vport->delayed_disc_tmo.data = (unsigned long)vport;
3413 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3417 spin_lock_irq(&phba->hbalock);
3418 list_add_tail(&vport->listentry, &phba->port_list);
3419 spin_unlock_irq(&phba->hbalock);
3423 scsi_host_put(shost);
3429 * destroy_port - destroy an FC port
3430 * @vport: pointer to an lpfc virtual N_Port data structure.
3432 * This routine destroys a FC port from the upper layer protocol. All the
3433 * resources associated with the port are released.
3436 destroy_port(struct lpfc_vport *vport)
3438 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3439 struct lpfc_hba *phba = vport->phba;
3441 lpfc_debugfs_terminate(vport);
3442 fc_remove_host(shost);
3443 scsi_remove_host(shost);
3445 spin_lock_irq(&phba->hbalock);
3446 list_del_init(&vport->listentry);
3447 spin_unlock_irq(&phba->hbalock);
3449 lpfc_cleanup(vport);
3454 * lpfc_get_instance - Get a unique integer ID
3456 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
3457 * uses the kernel idr facility to perform the task.
3460 * instance - a unique integer ID allocated as the new instance.
3461 * -1 - lpfc get instance failed.
3464 lpfc_get_instance(void)
3468 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3469 return ret < 0 ? -1 : ret;
3473 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
3474 * @shost: pointer to SCSI host data structure.
3475 * @time: elapsed time of the scan in jiffies.
3477 * This routine is called by the SCSI layer with a SCSI host to determine
3478 * whether the scan host is finished.
3480 * Note: there is no scan_start function as adapter initialization will have
3481 * asynchronously kicked off the link initialization.
3484 * 0 - SCSI host scan is not over yet.
3485 * 1 - SCSI host scan is over.
3487 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3489 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3490 struct lpfc_hba *phba = vport->phba;
3493 spin_lock_irq(shost->host_lock);
3495 if (vport->load_flag & FC_UNLOADING) {
3499 if (time >= msecs_to_jiffies(30 * 1000)) {
3500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3501 "0461 Scanning longer than 30 "
3502 "seconds. Continuing initialization\n");
3506 if (time >= msecs_to_jiffies(15 * 1000) &&
3507 phba->link_state <= LPFC_LINK_DOWN) {
3508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3509 "0465 Link down longer than 15 "
3510 "seconds. Continuing initialization\n");
3515 if (vport->port_state != LPFC_VPORT_READY)
3517 if (vport->num_disc_nodes || vport->fc_prli_sent)
3519 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3521 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3527 spin_unlock_irq(shost->host_lock);
3532 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
3533 * @shost: pointer to SCSI host data structure.
3535 * This routine initializes a given SCSI host attributes on a FC port. The
3536 * SCSI host can be either on top of a physical port or a virtual port.
3538 void lpfc_host_attrib_init(struct Scsi_Host *shost)
3540 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3541 struct lpfc_hba *phba = vport->phba;
3543 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
3546 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3547 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3548 fc_host_supported_classes(shost) = FC_COS_CLASS3;
3550 memset(fc_host_supported_fc4s(shost), 0,
3551 sizeof(fc_host_supported_fc4s(shost)));
3552 fc_host_supported_fc4s(shost)[2] = 1;
3553 fc_host_supported_fc4s(shost)[7] = 1;
3555 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3556 sizeof fc_host_symbolic_name(shost));
3558 fc_host_supported_speeds(shost) = 0;
3559 if (phba->lmt & LMT_32Gb)
3560 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
3561 if (phba->lmt & LMT_16Gb)
3562 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3563 if (phba->lmt & LMT_10Gb)
3564 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3565 if (phba->lmt & LMT_8Gb)
3566 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3567 if (phba->lmt & LMT_4Gb)
3568 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3569 if (phba->lmt & LMT_2Gb)
3570 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3571 if (phba->lmt & LMT_1Gb)
3572 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3574 fc_host_maxframe_size(shost) =
3575 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3576 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3578 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3580 /* This value is also unchanging */
3581 memset(fc_host_active_fc4s(shost), 0,
3582 sizeof(fc_host_active_fc4s(shost)));
3583 fc_host_active_fc4s(shost)[2] = 1;
3584 fc_host_active_fc4s(shost)[7] = 1;
3586 fc_host_max_npiv_vports(shost) = phba->max_vpi;
3587 spin_lock_irq(shost->host_lock);
3588 vport->load_flag &= ~FC_LOADING;
3589 spin_unlock_irq(shost->host_lock);
3593 * lpfc_stop_port_s3 - Stop SLI3 device port
3594 * @phba: pointer to lpfc hba data structure.
3596 * This routine is invoked to stop an SLI3 device port, it stops the device
3597 * from generating interrupts and stops the device driver's timers for the
3601 lpfc_stop_port_s3(struct lpfc_hba *phba)
3603 /* Clear all interrupt enable conditions */
3604 writel(0, phba->HCregaddr);
3605 readl(phba->HCregaddr); /* flush */
3606 /* Clear all pending interrupts */
3607 writel(0xffffffff, phba->HAregaddr);
3608 readl(phba->HAregaddr); /* flush */
3610 /* Reset some HBA SLI setup states */
3611 lpfc_stop_hba_timers(phba);
3612 phba->pport->work_port_events = 0;
3616 * lpfc_stop_port_s4 - Stop SLI4 device port
3617 * @phba: pointer to lpfc hba data structure.
3619 * This routine is invoked to stop an SLI4 device port, it stops the device
3620 * from generating interrupts and stops the device driver's timers for the
3624 lpfc_stop_port_s4(struct lpfc_hba *phba)
3626 /* Reset some HBA SLI4 setup states */
3627 lpfc_stop_hba_timers(phba);
3628 phba->pport->work_port_events = 0;
3629 phba->sli4_hba.intr_enable = 0;
3633 * lpfc_stop_port - Wrapper function for stopping hba port
3634 * @phba: Pointer to HBA context object.
3636 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
3637 * the API jump table function pointer from the lpfc_hba struct.
3640 lpfc_stop_port(struct lpfc_hba *phba)
3642 phba->lpfc_stop_port(phba);
3646 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
3647 * @phba: Pointer to hba for which this call is being executed.
3649 * This routine starts the timer waiting for the FCF rediscovery to complete.
3652 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3654 unsigned long fcf_redisc_wait_tmo =
3655 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3656 /* Start fcf rediscovery wait period timer */
3657 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3658 spin_lock_irq(&phba->hbalock);
3659 /* Allow action to new fcf asynchronous event */
3660 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3661 /* Mark the FCF rediscovery pending state */
3662 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3663 spin_unlock_irq(&phba->hbalock);
3667 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3668 * @ptr: Map to lpfc_hba data structure pointer.
3670 * This routine is invoked when waiting for FCF table rediscover has been
3671 * timed out. If new FCF record(s) has (have) been discovered during the
3672 * wait period, a new FCF event shall be added to the FCOE async event
3673 * list, and then worker thread shall be waked up for processing from the
3674 * worker thread context.
3677 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3679 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3681 /* Don't send FCF rediscovery event if timer cancelled */
3682 spin_lock_irq(&phba->hbalock);
3683 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3684 spin_unlock_irq(&phba->hbalock);
3687 /* Clear FCF rediscovery timer pending flag */
3688 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3689 /* FCF rediscovery event to worker thread */
3690 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3691 spin_unlock_irq(&phba->hbalock);
3692 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3693 "2776 FCF rediscover quiescent timer expired\n");
3694 /* wake up worker thread */
3695 lpfc_worker_wake_up(phba);
3699 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3700 * @phba: pointer to lpfc hba data structure.
3701 * @acqe_link: pointer to the async link completion queue entry.
3703 * This routine is to parse the SLI4 link-attention link fault code and
3704 * translate it into the base driver's read link attention mailbox command
3707 * Return: Link-attention status in terms of base driver's coding.
3710 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3711 struct lpfc_acqe_link *acqe_link)
3713 uint16_t latt_fault;
3715 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3716 case LPFC_ASYNC_LINK_FAULT_NONE:
3717 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3718 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3723 "0398 Invalid link fault code: x%x\n",
3724 bf_get(lpfc_acqe_link_fault, acqe_link));
3725 latt_fault = MBXERR_ERROR;
3732 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3733 * @phba: pointer to lpfc hba data structure.
3734 * @acqe_link: pointer to the async link completion queue entry.
3736 * This routine is to parse the SLI4 link attention type and translate it
3737 * into the base driver's link attention type coding.
3739 * Return: Link attention type in terms of base driver's coding.
3742 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3743 struct lpfc_acqe_link *acqe_link)
3747 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3748 case LPFC_ASYNC_LINK_STATUS_DOWN:
3749 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3750 att_type = LPFC_ATT_LINK_DOWN;
3752 case LPFC_ASYNC_LINK_STATUS_UP:
3753 /* Ignore physical link up events - wait for logical link up */
3754 att_type = LPFC_ATT_RESERVED;
3756 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3757 att_type = LPFC_ATT_LINK_UP;
3760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3761 "0399 Invalid link attention type: x%x\n",
3762 bf_get(lpfc_acqe_link_status, acqe_link));
3763 att_type = LPFC_ATT_RESERVED;
3770 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
3771 * @phba: pointer to lpfc hba data structure.
3773 * This routine is to get an SLI3 FC port's link speed in Mbps.
3775 * Return: link speed in terms of Mbps.
3778 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3780 uint32_t link_speed;
3782 if (!lpfc_is_link_up(phba))
3785 if (phba->sli_rev <= LPFC_SLI_REV3) {
3786 switch (phba->fc_linkspeed) {
3787 case LPFC_LINK_SPEED_1GHZ:
3790 case LPFC_LINK_SPEED_2GHZ:
3793 case LPFC_LINK_SPEED_4GHZ:
3796 case LPFC_LINK_SPEED_8GHZ:
3799 case LPFC_LINK_SPEED_10GHZ:
3802 case LPFC_LINK_SPEED_16GHZ:
3809 if (phba->sli4_hba.link_state.logical_speed)
3811 phba->sli4_hba.link_state.logical_speed;
3813 link_speed = phba->sli4_hba.link_state.speed;
3819 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
3820 * @phba: pointer to lpfc hba data structure.
3821 * @evt_code: asynchronous event code.
3822 * @speed_code: asynchronous event link speed code.
3824 * This routine is to parse the giving SLI4 async event link speed code into
3825 * value of Mbps for the link speed.
3827 * Return: link speed in terms of Mbps.
3830 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3833 uint32_t port_speed;
3836 case LPFC_TRAILER_CODE_LINK:
3837 switch (speed_code) {
3838 case LPFC_ASYNC_LINK_SPEED_ZERO:
3841 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3844 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3847 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3850 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3853 case LPFC_ASYNC_LINK_SPEED_20GBPS:
3856 case LPFC_ASYNC_LINK_SPEED_25GBPS:
3859 case LPFC_ASYNC_LINK_SPEED_40GBPS:
3866 case LPFC_TRAILER_CODE_FC:
3867 switch (speed_code) {
3868 case LPFC_FC_LA_SPEED_UNKNOWN:
3871 case LPFC_FC_LA_SPEED_1G:
3874 case LPFC_FC_LA_SPEED_2G:
3877 case LPFC_FC_LA_SPEED_4G:
3880 case LPFC_FC_LA_SPEED_8G:
3883 case LPFC_FC_LA_SPEED_10G:
3886 case LPFC_FC_LA_SPEED_16G:
3889 case LPFC_FC_LA_SPEED_32G:
3903 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3904 * @phba: pointer to lpfc hba data structure.
3905 * @acqe_link: pointer to the async link completion queue entry.
3907 * This routine is to handle the SLI4 asynchronous FCoE link event.
3910 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3911 struct lpfc_acqe_link *acqe_link)
3913 struct lpfc_dmabuf *mp;
3916 struct lpfc_mbx_read_top *la;
3920 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3921 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3923 phba->fcoe_eventtag = acqe_link->event_tag;
3924 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3926 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3927 "0395 The mboxq allocation failed\n");
3930 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3932 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3933 "0396 The lpfc_dmabuf allocation failed\n");
3936 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3938 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3939 "0397 The mbuf allocation failed\n");
3940 goto out_free_dmabuf;
3943 /* Cleanup any outstanding ELS commands */
3944 lpfc_els_flush_all_cmd(phba);
3946 /* Block ELS IOCBs until we have done process link event */
3947 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3949 /* Update link event statistics */
3950 phba->sli.slistat.link_event++;
3952 /* Create lpfc_handle_latt mailbox command from link ACQE */
3953 lpfc_read_topology(phba, pmb, mp);
3954 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3955 pmb->vport = phba->pport;
3957 /* Keep the link status for extra SLI4 state machine reference */
3958 phba->sli4_hba.link_state.speed =
3959 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3960 bf_get(lpfc_acqe_link_speed, acqe_link));
3961 phba->sli4_hba.link_state.duplex =
3962 bf_get(lpfc_acqe_link_duplex, acqe_link);
3963 phba->sli4_hba.link_state.status =
3964 bf_get(lpfc_acqe_link_status, acqe_link);
3965 phba->sli4_hba.link_state.type =
3966 bf_get(lpfc_acqe_link_type, acqe_link);
3967 phba->sli4_hba.link_state.number =
3968 bf_get(lpfc_acqe_link_number, acqe_link);
3969 phba->sli4_hba.link_state.fault =
3970 bf_get(lpfc_acqe_link_fault, acqe_link);
3971 phba->sli4_hba.link_state.logical_speed =
3972 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3974 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3975 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3976 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3977 "Logical speed:%dMbps Fault:%d\n",
3978 phba->sli4_hba.link_state.speed,
3979 phba->sli4_hba.link_state.topology,
3980 phba->sli4_hba.link_state.status,
3981 phba->sli4_hba.link_state.type,
3982 phba->sli4_hba.link_state.number,
3983 phba->sli4_hba.link_state.logical_speed,
3984 phba->sli4_hba.link_state.fault);
3986 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3987 * topology info. Note: Optional for non FC-AL ports.
3989 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3990 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3991 if (rc == MBX_NOT_FINISHED)
3992 goto out_free_dmabuf;
3996 * For FCoE Mode: fill in all the topology information we need and call
3997 * the READ_TOPOLOGY completion routine to continue without actually
3998 * sending the READ_TOPOLOGY mailbox command to the port.
4000 /* Parse and translate status field */
4002 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
4004 /* Parse and translate link attention fields */
4005 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
4006 la->eventTag = acqe_link->event_tag;
4007 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
4008 bf_set(lpfc_mbx_read_top_link_spd, la,
4009 (bf_get(lpfc_acqe_link_speed, acqe_link)));
4011 /* Fake the the following irrelvant fields */
4012 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
4013 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
4014 bf_set(lpfc_mbx_read_top_il, la, 0);
4015 bf_set(lpfc_mbx_read_top_pb, la, 0);
4016 bf_set(lpfc_mbx_read_top_fa, la, 0);
4017 bf_set(lpfc_mbx_read_top_mm, la, 0);
4019 /* Invoke the lpfc_handle_latt mailbox command callback function */
4020 lpfc_mbx_cmpl_read_topology(phba, pmb);
4027 mempool_free(pmb, phba->mbox_mem_pool);
4031 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
4032 * @phba: pointer to lpfc hba data structure.
4033 * @acqe_fc: pointer to the async fc completion queue entry.
4035 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
4036 * that the event was received and then issue a read_topology mailbox command so
4037 * that the rest of the driver will treat it the same as SLI3.
4040 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4042 struct lpfc_dmabuf *mp;
4045 struct lpfc_mbx_read_top *la;
4048 if (bf_get(lpfc_trailer_type, acqe_fc) !=
4049 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
4050 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4051 "2895 Non FC link Event detected.(%d)\n",
4052 bf_get(lpfc_trailer_type, acqe_fc));
4055 /* Keep the link status for extra SLI4 state machine reference */
4056 phba->sli4_hba.link_state.speed =
4057 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
4058 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
4059 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
4060 phba->sli4_hba.link_state.topology =
4061 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
4062 phba->sli4_hba.link_state.status =
4063 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
4064 phba->sli4_hba.link_state.type =
4065 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
4066 phba->sli4_hba.link_state.number =
4067 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
4068 phba->sli4_hba.link_state.fault =
4069 bf_get(lpfc_acqe_link_fault, acqe_fc);
4070 phba->sli4_hba.link_state.logical_speed =
4071 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
4072 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4073 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
4074 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
4075 "%dMbps Fault:%d\n",
4076 phba->sli4_hba.link_state.speed,
4077 phba->sli4_hba.link_state.topology,
4078 phba->sli4_hba.link_state.status,
4079 phba->sli4_hba.link_state.type,
4080 phba->sli4_hba.link_state.number,
4081 phba->sli4_hba.link_state.logical_speed,
4082 phba->sli4_hba.link_state.fault);
4083 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4086 "2897 The mboxq allocation failed\n");
4089 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4092 "2898 The lpfc_dmabuf allocation failed\n");
4095 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
4097 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4098 "2899 The mbuf allocation failed\n");
4099 goto out_free_dmabuf;
4102 /* Cleanup any outstanding ELS commands */
4103 lpfc_els_flush_all_cmd(phba);
4105 /* Block ELS IOCBs until we have done process link event */
4106 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
4108 /* Update link event statistics */
4109 phba->sli.slistat.link_event++;
4111 /* Create lpfc_handle_latt mailbox command from link ACQE */
4112 lpfc_read_topology(phba, pmb, mp);
4113 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
4114 pmb->vport = phba->pport;
4116 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
4117 /* Parse and translate status field */
4119 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
4122 /* Parse and translate link attention fields */
4123 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
4124 la->eventTag = acqe_fc->event_tag;
4125 bf_set(lpfc_mbx_read_top_att_type, la,
4126 LPFC_FC_LA_TYPE_LINK_DOWN);
4128 /* Invoke the mailbox command callback function */
4129 lpfc_mbx_cmpl_read_topology(phba, pmb);
4134 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4135 if (rc == MBX_NOT_FINISHED)
4136 goto out_free_dmabuf;
4142 mempool_free(pmb, phba->mbox_mem_pool);
4146 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
4147 * @phba: pointer to lpfc hba data structure.
4148 * @acqe_fc: pointer to the async SLI completion queue entry.
4150 * This routine is to handle the SLI4 asynchronous SLI events.
4153 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4159 uint8_t operational = 0;
4160 struct temp_event temp_event_data;
4161 struct lpfc_acqe_misconfigured_event *misconfigured;
4162 struct Scsi_Host *shost;
4164 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4166 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4167 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4168 "x%08x SLI Event Type:%d\n",
4169 acqe_sli->event_data1, acqe_sli->event_data2,
4172 port_name = phba->Port[0];
4173 if (port_name == 0x00)
4174 port_name = '?'; /* get port name is empty */
4177 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
4178 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4179 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
4180 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4182 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4183 "3190 Over Temperature:%d Celsius- Port Name %c\n",
4184 acqe_sli->event_data1, port_name);
4186 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
4187 shost = lpfc_shost_from_vport(phba->pport);
4188 fc_host_post_vendor_event(shost, fc_get_event_number(),
4189 sizeof(temp_event_data),
4190 (char *)&temp_event_data,
4191 SCSI_NL_VID_TYPE_PCI
4192 | PCI_VENDOR_ID_EMULEX);
4194 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
4195 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
4196 temp_event_data.event_code = LPFC_NORMAL_TEMP;
4197 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
4199 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4200 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
4201 acqe_sli->event_data1, port_name);
4203 shost = lpfc_shost_from_vport(phba->pport);
4204 fc_host_post_vendor_event(shost, fc_get_event_number(),
4205 sizeof(temp_event_data),
4206 (char *)&temp_event_data,
4207 SCSI_NL_VID_TYPE_PCI
4208 | PCI_VENDOR_ID_EMULEX);
4210 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
4211 misconfigured = (struct lpfc_acqe_misconfigured_event *)
4212 &acqe_sli->event_data1;
4214 /* fetch the status for this port */
4215 switch (phba->sli4_hba.lnk_info.lnk_no) {
4216 case LPFC_LINK_NUMBER_0:
4217 status = bf_get(lpfc_sli_misconfigured_port0_state,
4218 &misconfigured->theEvent);
4219 operational = bf_get(lpfc_sli_misconfigured_port0_op,
4220 &misconfigured->theEvent);
4222 case LPFC_LINK_NUMBER_1:
4223 status = bf_get(lpfc_sli_misconfigured_port1_state,
4224 &misconfigured->theEvent);
4225 operational = bf_get(lpfc_sli_misconfigured_port1_op,
4226 &misconfigured->theEvent);
4228 case LPFC_LINK_NUMBER_2:
4229 status = bf_get(lpfc_sli_misconfigured_port2_state,
4230 &misconfigured->theEvent);
4231 operational = bf_get(lpfc_sli_misconfigured_port2_op,
4232 &misconfigured->theEvent);
4234 case LPFC_LINK_NUMBER_3:
4235 status = bf_get(lpfc_sli_misconfigured_port3_state,
4236 &misconfigured->theEvent);
4237 operational = bf_get(lpfc_sli_misconfigured_port3_op,
4238 &misconfigured->theEvent);
4241 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4243 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
4244 "event: Invalid link %d",
4245 phba->sli4_hba.lnk_info.lnk_no);
4249 /* Skip if optic state unchanged */
4250 if (phba->sli4_hba.lnk_info.optic_state == status)
4254 case LPFC_SLI_EVENT_STATUS_VALID:
4255 sprintf(message, "Physical Link is functional");
4257 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
4258 sprintf(message, "Optics faulted/incorrectly "
4259 "installed/not installed - Reseat optics, "
4260 "if issue not resolved, replace.");
4262 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
4264 "Optics of two types installed - Remove one "
4265 "optic or install matching pair of optics.");
4267 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
4268 sprintf(message, "Incompatible optics - Replace with "
4269 "compatible optics for card to function.");
4271 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
4272 sprintf(message, "Unqualified optics - Replace with "
4273 "Avago optics for Warranty and Technical "
4274 "Support - Link is%s operational",
4275 (operational) ? "" : " not");
4277 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
4278 sprintf(message, "Uncertified optics - Replace with "
4279 "Avago-certified optics to enable link "
4280 "operation - Link is%s operational",
4281 (operational) ? "" : " not");
4284 /* firmware is reporting a status we don't know about */
4285 sprintf(message, "Unknown event status x%02x", status);
4288 phba->sli4_hba.lnk_info.optic_state = status;
4289 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4290 "3176 Port Name %c %s\n", port_name, message);
4292 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
4293 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4294 "3192 Remote DPort Test Initiated - "
4295 "Event Data1:x%08x Event Data2: x%08x\n",
4296 acqe_sli->event_data1, acqe_sli->event_data2);
4299 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4300 "3193 Async SLI event - Event Data1:x%08x Event Data2:"
4301 "x%08x SLI Event Type:%d\n",
4302 acqe_sli->event_data1, acqe_sli->event_data2,
4309 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
4310 * @vport: pointer to vport data structure.
4312 * This routine is to perform Clear Virtual Link (CVL) on a vport in
4313 * response to a CVL event.
4315 * Return the pointer to the ndlp with the vport if successful, otherwise
4318 static struct lpfc_nodelist *
4319 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
4321 struct lpfc_nodelist *ndlp;
4322 struct Scsi_Host *shost;
4323 struct lpfc_hba *phba;
4330 ndlp = lpfc_findnode_did(vport, Fabric_DID);
4332 /* Cannot find existing Fabric ndlp, so allocate a new one */
4333 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4336 lpfc_nlp_init(vport, ndlp, Fabric_DID);
4337 /* Set the node type */
4338 ndlp->nlp_type |= NLP_FABRIC;
4339 /* Put ndlp onto node list */
4340 lpfc_enqueue_node(vport, ndlp);
4341 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4342 /* re-setup ndlp without removing from node list */
4343 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4347 if ((phba->pport->port_state < LPFC_FLOGI) &&
4348 (phba->pport->port_state != LPFC_VPORT_FAILED))
4350 /* If virtual link is not yet instantiated ignore CVL */
4351 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4352 && (vport->port_state != LPFC_VPORT_FAILED))
4354 shost = lpfc_shost_from_vport(vport);
4357 lpfc_linkdown_port(vport);
4358 lpfc_cleanup_pending_mbox(vport);
4359 spin_lock_irq(shost->host_lock);
4360 vport->fc_flag |= FC_VPORT_CVL_RCVD;
4361 spin_unlock_irq(shost->host_lock);
4367 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4368 * @vport: pointer to lpfc hba data structure.
4370 * This routine is to perform Clear Virtual Link (CVL) on all vports in
4371 * response to a FCF dead event.
4374 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4376 struct lpfc_vport **vports;
4379 vports = lpfc_create_vport_work_array(phba);
4381 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4382 lpfc_sli4_perform_vport_cvl(vports[i]);
4383 lpfc_destroy_vport_work_array(phba, vports);
4387 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4388 * @phba: pointer to lpfc hba data structure.
4389 * @acqe_link: pointer to the async fcoe completion queue entry.
4391 * This routine is to handle the SLI4 asynchronous fcoe event.
4394 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4395 struct lpfc_acqe_fip *acqe_fip)
4397 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4399 struct lpfc_vport *vport;
4400 struct lpfc_nodelist *ndlp;
4401 struct Scsi_Host *shost;
4402 int active_vlink_present;
4403 struct lpfc_vport **vports;
4406 phba->fc_eventTag = acqe_fip->event_tag;
4407 phba->fcoe_eventtag = acqe_fip->event_tag;
4408 switch (event_type) {
4409 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4410 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4411 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
4412 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4414 "2546 New FCF event, evt_tag:x%x, "
4416 acqe_fip->event_tag,
4419 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4421 "2788 FCF param modified event, "
4422 "evt_tag:x%x, index:x%x\n",
4423 acqe_fip->event_tag,
4425 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4427 * During period of FCF discovery, read the FCF
4428 * table record indexed by the event to update
4429 * FCF roundrobin failover eligible FCF bmask.
4431 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4433 "2779 Read FCF (x%x) for updating "
4434 "roundrobin FCF failover bmask\n",
4436 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
4439 /* If the FCF discovery is in progress, do nothing. */
4440 spin_lock_irq(&phba->hbalock);
4441 if (phba->hba_flag & FCF_TS_INPROG) {
4442 spin_unlock_irq(&phba->hbalock);
4445 /* If fast FCF failover rescan event is pending, do nothing */
4446 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4447 spin_unlock_irq(&phba->hbalock);
4451 /* If the FCF has been in discovered state, do nothing. */
4452 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
4453 spin_unlock_irq(&phba->hbalock);
4456 spin_unlock_irq(&phba->hbalock);
4458 /* Otherwise, scan the entire FCF table and re-discover SAN */
4459 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4460 "2770 Start FCF table scan per async FCF "
4461 "event, evt_tag:x%x, index:x%x\n",
4462 acqe_fip->event_tag, acqe_fip->index);
4463 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
4464 LPFC_FCOE_FCF_GET_FIRST);
4466 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4467 "2547 Issue FCF scan read FCF mailbox "
4468 "command failed (x%x)\n", rc);
4471 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
4472 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4473 "2548 FCF Table full count 0x%x tag 0x%x\n",
4474 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4475 acqe_fip->event_tag);
4478 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
4479 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4480 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4481 "2549 FCF (x%x) disconnected from network, "
4482 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
4484 * If we are in the middle of FCF failover process, clear
4485 * the corresponding FCF bit in the roundrobin bitmap.
4487 spin_lock_irq(&phba->hbalock);
4488 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
4489 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
4490 spin_unlock_irq(&phba->hbalock);
4491 /* Update FLOGI FCF failover eligible FCF bmask */
4492 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
4495 spin_unlock_irq(&phba->hbalock);
4497 /* If the event is not for currently used fcf do nothing */
4498 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
4502 * Otherwise, request the port to rediscover the entire FCF
4503 * table for a fast recovery from case that the current FCF
4504 * is no longer valid as we are not in the middle of FCF
4505 * failover process already.
4507 spin_lock_irq(&phba->hbalock);
4508 /* Mark the fast failover process in progress */
4509 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4510 spin_unlock_irq(&phba->hbalock);
4512 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4513 "2771 Start FCF fast failover process due to "
4514 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4515 "\n", acqe_fip->event_tag, acqe_fip->index);
4516 rc = lpfc_sli4_redisc_fcf_table(phba);
4518 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4520 "2772 Issue FCF rediscover mabilbox "
4521 "command failed, fail through to FCF "
4523 spin_lock_irq(&phba->hbalock);
4524 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4525 spin_unlock_irq(&phba->hbalock);
4527 * Last resort will fail over by treating this
4528 * as a link down to FCF registration.
4530 lpfc_sli4_fcf_dead_failthrough(phba);
4532 /* Reset FCF roundrobin bmask for new discovery */
4533 lpfc_sli4_clear_fcf_rr_bmask(phba);
4535 * Handling fast FCF failover to a DEAD FCF event is
4536 * considered equalivant to receiving CVL to all vports.
4538 lpfc_sli4_perform_all_vport_cvl(phba);
4541 case LPFC_FIP_EVENT_TYPE_CVL:
4542 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4543 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4544 "2718 Clear Virtual Link Received for VPI 0x%x"
4545 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
4547 vport = lpfc_find_vport_by_vpid(phba,
4549 ndlp = lpfc_sli4_perform_vport_cvl(vport);
4552 active_vlink_present = 0;
4554 vports = lpfc_create_vport_work_array(phba);
4556 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4558 if ((!(vports[i]->fc_flag &
4559 FC_VPORT_CVL_RCVD)) &&
4560 (vports[i]->port_state > LPFC_FDISC)) {
4561 active_vlink_present = 1;
4565 lpfc_destroy_vport_work_array(phba, vports);
4569 * Don't re-instantiate if vport is marked for deletion.
4570 * If we are here first then vport_delete is going to wait
4571 * for discovery to complete.
4573 if (!(vport->load_flag & FC_UNLOADING) &&
4574 active_vlink_present) {
4576 * If there are other active VLinks present,
4577 * re-instantiate the Vlink using FDISC.
4579 mod_timer(&ndlp->nlp_delayfunc,
4580 jiffies + msecs_to_jiffies(1000));
4581 shost = lpfc_shost_from_vport(vport);
4582 spin_lock_irq(shost->host_lock);
4583 ndlp->nlp_flag |= NLP_DELAY_TMO;
4584 spin_unlock_irq(shost->host_lock);
4585 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
4586 vport->port_state = LPFC_FDISC;
4589 * Otherwise, we request port to rediscover
4590 * the entire FCF table for a fast recovery
4591 * from possible case that the current FCF
4592 * is no longer valid if we are not already
4593 * in the FCF failover process.
4595 spin_lock_irq(&phba->hbalock);
4596 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4597 spin_unlock_irq(&phba->hbalock);
4600 /* Mark the fast failover process in progress */
4601 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
4602 spin_unlock_irq(&phba->hbalock);
4603 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4605 "2773 Start FCF failover per CVL, "
4606 "evt_tag:x%x\n", acqe_fip->event_tag);
4607 rc = lpfc_sli4_redisc_fcf_table(phba);
4609 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4611 "2774 Issue FCF rediscover "
4612 "mabilbox command failed, "
4613 "through to CVL event\n");
4614 spin_lock_irq(&phba->hbalock);
4615 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
4616 spin_unlock_irq(&phba->hbalock);
4618 * Last resort will be re-try on the
4619 * the current registered FCF entry.
4621 lpfc_retry_pport_discovery(phba);
4624 * Reset FCF roundrobin bmask for new
4627 lpfc_sli4_clear_fcf_rr_bmask(phba);
4631 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4632 "0288 Unknown FCoE event type 0x%x event tag "
4633 "0x%x\n", event_type, acqe_fip->event_tag);
4639 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
4640 * @phba: pointer to lpfc hba data structure.
4641 * @acqe_link: pointer to the async dcbx completion queue entry.
4643 * This routine is to handle the SLI4 asynchronous dcbx event.
4646 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
4647 struct lpfc_acqe_dcbx *acqe_dcbx)
4649 phba->fc_eventTag = acqe_dcbx->event_tag;
4650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4651 "0290 The SLI4 DCBX asynchronous event is not "
4656 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
4657 * @phba: pointer to lpfc hba data structure.
4658 * @acqe_link: pointer to the async grp5 completion queue entry.
4660 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
4661 * is an asynchronous notified of a logical link speed change. The Port
4662 * reports the logical link speed in units of 10Mbps.
4665 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4666 struct lpfc_acqe_grp5 *acqe_grp5)
4668 uint16_t prev_ll_spd;
4670 phba->fc_eventTag = acqe_grp5->event_tag;
4671 phba->fcoe_eventtag = acqe_grp5->event_tag;
4672 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4673 phba->sli4_hba.link_state.logical_speed =
4674 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4675 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4676 "2789 GRP5 Async Event: Updating logical link speed "
4677 "from %dMbps to %dMbps\n", prev_ll_spd,
4678 phba->sli4_hba.link_state.logical_speed);
4682 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
4683 * @phba: pointer to lpfc hba data structure.
4685 * This routine is invoked by the worker thread to process all the pending
4686 * SLI4 asynchronous events.
4688 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
4690 struct lpfc_cq_event *cq_event;
4692 /* First, declare the async event has been handled */
4693 spin_lock_irq(&phba->hbalock);
4694 phba->hba_flag &= ~ASYNC_EVENT;
4695 spin_unlock_irq(&phba->hbalock);
4696 /* Now, handle all the async events */
4697 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
4698 /* Get the first event from the head of the event queue */
4699 spin_lock_irq(&phba->hbalock);
4700 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
4701 cq_event, struct lpfc_cq_event, list);
4702 spin_unlock_irq(&phba->hbalock);
4703 /* Process the asynchronous event */
4704 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
4705 case LPFC_TRAILER_CODE_LINK:
4706 lpfc_sli4_async_link_evt(phba,
4707 &cq_event->cqe.acqe_link);
4709 case LPFC_TRAILER_CODE_FCOE:
4710 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
4712 case LPFC_TRAILER_CODE_DCBX:
4713 lpfc_sli4_async_dcbx_evt(phba,
4714 &cq_event->cqe.acqe_dcbx);
4716 case LPFC_TRAILER_CODE_GRP5:
4717 lpfc_sli4_async_grp5_evt(phba,
4718 &cq_event->cqe.acqe_grp5);
4720 case LPFC_TRAILER_CODE_FC:
4721 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
4723 case LPFC_TRAILER_CODE_SLI:
4724 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
4727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4728 "1804 Invalid asynchrous event code: "
4729 "x%x\n", bf_get(lpfc_trailer_code,
4730 &cq_event->cqe.mcqe_cmpl));
4733 /* Free the completion event processed to the free pool */
4734 lpfc_sli4_cq_event_release(phba, cq_event);
4739 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
4740 * @phba: pointer to lpfc hba data structure.
4742 * This routine is invoked by the worker thread to process FCF table
4743 * rediscovery pending completion event.
4745 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
4749 spin_lock_irq(&phba->hbalock);
4750 /* Clear FCF rediscovery timeout event */
4751 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
4752 /* Clear driver fast failover FCF record flag */
4753 phba->fcf.failover_rec.flag = 0;
4754 /* Set state for FCF fast failover */
4755 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
4756 spin_unlock_irq(&phba->hbalock);
4758 /* Scan FCF table from the first entry to re-discover SAN */
4759 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4760 "2777 Start post-quiescent FCF table scan\n");
4761 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4763 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4764 "2747 Issue FCF scan read FCF mailbox "
4765 "command failed 0x%x\n", rc);
4769 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
4770 * @phba: pointer to lpfc hba data structure.
4771 * @dev_grp: The HBA PCI-Device group number.
4773 * This routine is invoked to set up the per HBA PCI-Device group function
4774 * API jump table entries.
4776 * Return: 0 if success, otherwise -ENODEV
4779 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4783 /* Set up lpfc PCI-device group */
4784 phba->pci_dev_grp = dev_grp;
4786 /* The LPFC_PCI_DEV_OC uses SLI4 */
4787 if (dev_grp == LPFC_PCI_DEV_OC)
4788 phba->sli_rev = LPFC_SLI_REV4;
4790 /* Set up device INIT API function jump table */
4791 rc = lpfc_init_api_table_setup(phba, dev_grp);
4794 /* Set up SCSI API function jump table */
4795 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4798 /* Set up SLI API function jump table */
4799 rc = lpfc_sli_api_table_setup(phba, dev_grp);
4802 /* Set up MBOX API function jump table */
4803 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4811 * lpfc_log_intr_mode - Log the active interrupt mode
4812 * @phba: pointer to lpfc hba data structure.
4813 * @intr_mode: active interrupt mode adopted.
4815 * This routine it invoked to log the currently used active interrupt mode
4818 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4820 switch (intr_mode) {
4822 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4823 "0470 Enable INTx interrupt mode.\n");
4826 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4827 "0481 Enabled MSI interrupt mode.\n");
4830 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4831 "0480 Enabled MSI-X interrupt mode.\n");
4834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4835 "0482 Illegal interrupt mode.\n");
4842 * lpfc_enable_pci_dev - Enable a generic PCI device.
4843 * @phba: pointer to lpfc hba data structure.
4845 * This routine is invoked to enable the PCI device that is common to all
4850 * other values - error
4853 lpfc_enable_pci_dev(struct lpfc_hba *phba)
4855 struct pci_dev *pdev;
4857 /* Obtain PCI device reference */
4861 pdev = phba->pcidev;
4862 /* Enable PCI device */
4863 if (pci_enable_device_mem(pdev))
4865 /* Request PCI resource for the device */
4866 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
4867 goto out_disable_device;
4868 /* Set up device as PCI master and save state for EEH */
4869 pci_set_master(pdev);
4870 pci_try_set_mwi(pdev);
4871 pci_save_state(pdev);
4873 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4874 if (pci_is_pcie(pdev))
4875 pdev->needs_freset = 1;
4880 pci_disable_device(pdev);
4882 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4883 "1401 Failed to enable pci device\n");
4888 * lpfc_disable_pci_dev - Disable a generic PCI device.
4889 * @phba: pointer to lpfc hba data structure.
4891 * This routine is invoked to disable the PCI device that is common to all
4895 lpfc_disable_pci_dev(struct lpfc_hba *phba)
4897 struct pci_dev *pdev;
4899 /* Obtain PCI device reference */
4903 pdev = phba->pcidev;
4904 /* Release PCI resource and disable PCI device */
4905 pci_release_mem_regions(pdev);
4906 pci_disable_device(pdev);
4912 * lpfc_reset_hba - Reset a hba
4913 * @phba: pointer to lpfc hba data structure.
4915 * This routine is invoked to reset a hba device. It brings the HBA
4916 * offline, performs a board restart, and then brings the board back
4917 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
4918 * on outstanding mailbox commands.
4921 lpfc_reset_hba(struct lpfc_hba *phba)
4923 /* If resets are disabled then set error state and return. */
4924 if (!phba->cfg_enable_hba_reset) {
4925 phba->link_state = LPFC_HBA_ERROR;
4928 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
4929 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4931 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
4933 lpfc_sli_brdrestart(phba);
4935 lpfc_unblock_mgmt_io(phba);
4939 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
4940 * @phba: pointer to lpfc hba data structure.
4942 * This function enables the PCI SR-IOV virtual functions to a physical
4943 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4944 * enable the number of virtual functions to the physical function. As
4945 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4946 * API call does not considered as an error condition for most of the device.
4949 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4951 struct pci_dev *pdev = phba->pcidev;
4955 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4959 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4964 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
4965 * @phba: pointer to lpfc hba data structure.
4966 * @nr_vfn: number of virtual functions to be enabled.
4968 * This function enables the PCI SR-IOV virtual functions to a physical
4969 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
4970 * enable the number of virtual functions to the physical function. As
4971 * not all devices support SR-IOV, the return code from the pci_enable_sriov()
4972 * API call does not considered as an error condition for most of the device.
4975 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4977 struct pci_dev *pdev = phba->pcidev;
4978 uint16_t max_nr_vfn;
4981 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4982 if (nr_vfn > max_nr_vfn) {
4983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4984 "3057 Requested vfs (%d) greater than "
4985 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4989 rc = pci_enable_sriov(pdev, nr_vfn);
4991 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4992 "2806 Failed to enable sriov on this device "
4993 "with vfn number nr_vf:%d, rc:%d\n",
4996 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4997 "2807 Successful enable sriov on this device "
4998 "with vfn number nr_vf:%d\n", nr_vfn);
5003 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
5004 * @phba: pointer to lpfc hba data structure.
5006 * This routine is invoked to set up the driver internal resources specific to
5007 * support the SLI-3 HBA device it attached to.
5011 * other values - error
5014 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
5016 struct lpfc_sli *psli;
5020 * Initialize timers used by driver
5023 /* Heartbeat timer */
5024 init_timer(&phba->hb_tmofunc);
5025 phba->hb_tmofunc.function = lpfc_hb_timeout;
5026 phba->hb_tmofunc.data = (unsigned long)phba;
5029 /* MBOX heartbeat timer */
5030 init_timer(&psli->mbox_tmo);
5031 psli->mbox_tmo.function = lpfc_mbox_timeout;
5032 psli->mbox_tmo.data = (unsigned long) phba;
5033 /* FCP polling mode timer */
5034 init_timer(&phba->fcp_poll_timer);
5035 phba->fcp_poll_timer.function = lpfc_poll_timeout;
5036 phba->fcp_poll_timer.data = (unsigned long) phba;
5037 /* Fabric block timer */
5038 init_timer(&phba->fabric_block_timer);
5039 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
5040 phba->fabric_block_timer.data = (unsigned long) phba;
5041 /* EA polling mode timer */
5042 init_timer(&phba->eratt_poll);
5043 phba->eratt_poll.function = lpfc_poll_eratt;
5044 phba->eratt_poll.data = (unsigned long) phba;
5046 /* Host attention work mask setup */
5047 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
5048 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
5050 /* Get all the module params for configuring this host */
5051 lpfc_get_cfgparam(phba);
5052 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
5053 phba->menlo_flag |= HBA_MENLO_SUPPORT;
5054 /* check for menlo minimum sg count */
5055 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
5056 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
5059 if (!phba->sli.ring)
5060 phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING *
5061 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
5062 if (!phba->sli.ring)
5066 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5067 * used to create the sg_dma_buf_pool must be dynamically calculated.
5070 /* Initialize the host templates the configured values. */
5071 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5072 lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
5074 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
5075 if (phba->cfg_enable_bg) {
5077 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5078 * the FCP rsp, and a BDE for each. Sice we have no control
5079 * over how many protection data segments the SCSI Layer
5080 * will hand us (ie: there could be one for every block
5081 * in the IO), we just allocate enough BDEs to accomidate
5082 * our max amount and we need to limit lpfc_sg_seg_cnt to
5083 * minimize the risk of running out.
5085 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5086 sizeof(struct fcp_rsp) +
5087 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
5089 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
5090 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
5092 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
5093 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
5096 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5097 * the FCP rsp, a BDE for each, and a BDE for up to
5098 * cfg_sg_seg_cnt data segments.
5100 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5101 sizeof(struct fcp_rsp) +
5102 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
5104 /* Total BDEs in BPL for scsi_sg_list */
5105 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
5108 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5109 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
5110 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5111 phba->cfg_total_seg_cnt);
5113 phba->max_vpi = LPFC_MAX_VPI;
5114 /* This will be set to correct value after config_port mbox */
5115 phba->max_vports = 0;
5118 * Initialize the SLI Layer to run with lpfc HBAs.
5120 lpfc_sli_setup(phba);
5121 lpfc_sli_queue_setup(phba);
5123 /* Allocate device driver memory */
5124 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
5128 * Enable sr-iov virtual functions if supported and configured
5129 * through the module parameter.
5131 if (phba->cfg_sriov_nr_virtfn > 0) {
5132 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5133 phba->cfg_sriov_nr_virtfn);
5135 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5136 "2808 Requested number of SR-IOV "
5137 "virtual functions (%d) is not "
5139 phba->cfg_sriov_nr_virtfn);
5140 phba->cfg_sriov_nr_virtfn = 0;
5148 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
5149 * @phba: pointer to lpfc hba data structure.
5151 * This routine is invoked to unset the driver internal resources set up
5152 * specific for supporting the SLI-3 HBA device it attached to.
5155 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
5157 /* Free device driver memory allocated */
5158 lpfc_mem_free_all(phba);
5164 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
5165 * @phba: pointer to lpfc hba data structure.
5167 * This routine is invoked to set up the driver internal resources specific to
5168 * support the SLI-4 HBA device it attached to.
5172 * other values - error
5175 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5177 struct lpfc_vector_map_info *cpup;
5178 struct lpfc_sli *psli;
5179 LPFC_MBOXQ_t *mboxq;
5180 int rc, i, hbq_count, max_buf_size;
5181 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
5182 struct lpfc_mqe *mqe;
5184 int fof_vectors = 0;
5186 /* Get all the module params for configuring this host */
5187 lpfc_get_cfgparam(phba);
5189 /* Before proceed, wait for POST done and device ready */
5190 rc = lpfc_sli4_post_status_check(phba);
5195 * Initialize timers used by driver
5198 /* Heartbeat timer */
5199 init_timer(&phba->hb_tmofunc);
5200 phba->hb_tmofunc.function = lpfc_hb_timeout;
5201 phba->hb_tmofunc.data = (unsigned long)phba;
5202 init_timer(&phba->rrq_tmr);
5203 phba->rrq_tmr.function = lpfc_rrq_timeout;
5204 phba->rrq_tmr.data = (unsigned long)phba;
5207 /* MBOX heartbeat timer */
5208 init_timer(&psli->mbox_tmo);
5209 psli->mbox_tmo.function = lpfc_mbox_timeout;
5210 psli->mbox_tmo.data = (unsigned long) phba;
5211 /* Fabric block timer */
5212 init_timer(&phba->fabric_block_timer);
5213 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
5214 phba->fabric_block_timer.data = (unsigned long) phba;
5215 /* EA polling mode timer */
5216 init_timer(&phba->eratt_poll);
5217 phba->eratt_poll.function = lpfc_poll_eratt;
5218 phba->eratt_poll.data = (unsigned long) phba;
5219 /* FCF rediscover timer */
5220 init_timer(&phba->fcf.redisc_wait);
5221 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
5222 phba->fcf.redisc_wait.data = (unsigned long)phba;
5225 * Control structure for handling external multi-buffer mailbox
5226 * command pass-through.
5228 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
5229 sizeof(struct lpfc_mbox_ext_buf_ctx));
5230 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
5232 phba->max_vpi = LPFC_MAX_VPI;
5234 /* This will be set to correct value after the read_config mbox */
5235 phba->max_vports = 0;
5237 /* Program the default value of vlan_id and fc_map */
5238 phba->valid_vlan = 0;
5239 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5240 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5241 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5244 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
5245 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
5247 if (!phba->sli.ring)
5248 phba->sli.ring = kzalloc(
5249 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
5250 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
5251 if (!phba->sli.ring)
5255 * It doesn't matter what family our adapter is in, we are
5256 * limited to 2 Pages, 512 SGEs, for our SGL.
5257 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5259 max_buf_size = (2 * SLI4_PAGE_SIZE);
5260 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
5261 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
5264 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
5265 * used to create the sg_dma_buf_pool must be dynamically calculated.
5268 if (phba->cfg_enable_bg) {
5270 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
5271 * the FCP rsp, and a SGE for each. Sice we have no control
5272 * over how many protection data segments the SCSI Layer
5273 * will hand us (ie: there could be one for every block
5274 * in the IO), we just allocate enough SGEs to accomidate
5275 * our max amount and we need to limit lpfc_sg_seg_cnt to
5276 * minimize the risk of running out.
5278 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5279 sizeof(struct fcp_rsp) + max_buf_size;
5281 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5282 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5284 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
5285 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
5288 * The scsi_buf for a regular I/O will hold the FCP cmnd,
5289 * the FCP rsp, a SGE for each, and a SGE for up to
5290 * cfg_sg_seg_cnt data segments.
5292 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5293 sizeof(struct fcp_rsp) +
5294 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
5296 /* Total SGEs for scsi_sg_list */
5297 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
5299 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
5300 * to post 1 page for the SGL.
5304 /* Initialize the host templates with the updated values. */
5305 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5306 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5308 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5309 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
5311 phba->cfg_sg_dma_buf_size =
5312 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5314 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5315 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5316 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5317 phba->cfg_total_seg_cnt);
5319 /* Initialize buffer queue management fields */
5320 hbq_count = lpfc_sli_hbq_count();
5321 for (i = 0; i < hbq_count; ++i)
5322 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5323 INIT_LIST_HEAD(&phba->rb_pend_list);
5324 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
5325 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
5328 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
5330 /* Initialize the Abort scsi buffer list used by driver */
5331 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5332 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5333 /* This abort list used by worker thread */
5334 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
5337 * Initialize driver internal slow-path work queues
5340 /* Driver internel slow-path CQ Event pool */
5341 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5342 /* Response IOCB work queue list */
5343 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
5344 /* Asynchronous event CQ Event work queue list */
5345 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5346 /* Fast-path XRI aborted CQ Event work queue list */
5347 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5348 /* Slow-path XRI aborted CQ Event work queue list */
5349 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5350 /* Receive queue CQ Event work queue list */
5351 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
5353 /* Initialize extent block lists. */
5354 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
5355 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
5356 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
5357 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
5359 /* initialize optic_state to 0xFF */
5360 phba->sli4_hba.lnk_info.optic_state = 0xff;
5362 /* Initialize the driver internal SLI layer lists. */
5363 lpfc_sli_setup(phba);
5364 lpfc_sli_queue_setup(phba);
5366 /* Allocate device driver memory */
5367 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
5371 /* IF Type 2 ports get initialized now. */
5372 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5373 LPFC_SLI_INTF_IF_TYPE_2) {
5374 rc = lpfc_pci_function_reset(phba);
5377 phba->temp_sensor_support = 1;
5380 /* Create the bootstrap mailbox command */
5381 rc = lpfc_create_bootstrap_mbox(phba);
5385 /* Set up the host's endian order with the device. */
5386 rc = lpfc_setup_endian_order(phba);
5388 goto out_free_bsmbx;
5390 /* Set up the hba's configuration parameters. */
5391 rc = lpfc_sli4_read_config(phba);
5393 goto out_free_bsmbx;
5394 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
5396 goto out_free_bsmbx;
5398 /* IF Type 0 ports get initialized now. */
5399 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5400 LPFC_SLI_INTF_IF_TYPE_0) {
5401 rc = lpfc_pci_function_reset(phba);
5403 goto out_free_bsmbx;
5406 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5410 goto out_free_bsmbx;
5413 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
5414 lpfc_supported_pages(mboxq);
5415 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5417 mqe = &mboxq->u.mqe;
5418 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
5419 LPFC_MAX_SUPPORTED_PAGES);
5420 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
5421 switch (pn_page[i]) {
5422 case LPFC_SLI4_PARAMETERS:
5423 phba->sli4_hba.pc_sli4_params.supported = 1;
5429 /* Read the port's SLI4 Parameters capabilities if supported. */
5430 if (phba->sli4_hba.pc_sli4_params.supported)
5431 rc = lpfc_pc_sli4_params_get(phba, mboxq);
5433 mempool_free(mboxq, phba->mbox_mem_pool);
5435 goto out_free_bsmbx;
5440 * Get sli4 parameters that override parameters from Port capabilities.
5441 * If this call fails, it isn't critical unless the SLI4 parameters come
5444 rc = lpfc_get_sli4_parameters(phba, mboxq);
5446 if (phba->sli4_hba.extents_in_use &&
5447 phba->sli4_hba.rpi_hdrs_in_use) {
5448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5449 "2999 Unsupported SLI4 Parameters "
5450 "Extents and RPI headers enabled.\n");
5451 goto out_free_bsmbx;
5454 mempool_free(mboxq, phba->mbox_mem_pool);
5456 /* Verify OAS is supported */
5457 lpfc_sli4_oas_verify(phba);
5461 /* Verify all the SLI4 queues */
5462 rc = lpfc_sli4_queue_verify(phba);
5464 goto out_free_bsmbx;
5466 /* Create driver internal CQE event pool */
5467 rc = lpfc_sli4_cq_event_pool_create(phba);
5469 goto out_free_bsmbx;
5471 /* Initialize sgl lists per host */
5472 lpfc_init_sgl_list(phba);
5474 /* Allocate and initialize active sgl array */
5475 rc = lpfc_init_active_sgl_array(phba);
5477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5478 "1430 Failed to initialize sgl list.\n");
5479 goto out_destroy_cq_event_pool;
5481 rc = lpfc_sli4_init_rpi_hdrs(phba);
5483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5484 "1432 Failed to initialize rpi headers.\n");
5485 goto out_free_active_sgl;
5488 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
5489 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
5490 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
5492 if (!phba->fcf.fcf_rr_bmask) {
5493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5494 "2759 Failed allocate memory for FCF round "
5495 "robin failover bmask\n");
5497 goto out_remove_rpi_hdrs;
5500 phba->sli4_hba.fcp_eq_hdl =
5501 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5502 (fof_vectors + phba->cfg_fcp_io_channel)),
5504 if (!phba->sli4_hba.fcp_eq_hdl) {
5505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5506 "2572 Failed allocate memory for "
5507 "fast-path per-EQ handle array\n");
5509 goto out_free_fcf_rr_bmask;
5512 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5514 phba->cfg_fcp_io_channel)), GFP_KERNEL);
5515 if (!phba->sli4_hba.msix_entries) {
5516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5517 "2573 Failed allocate memory for msi-x "
5518 "interrupt vector entries\n");
5520 goto out_free_fcp_eq_hdl;
5523 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5524 phba->sli4_hba.num_present_cpu),
5526 if (!phba->sli4_hba.cpu_map) {
5527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5528 "3327 Failed allocate memory for msi-x "
5529 "interrupt vector mapping\n");
5533 if (lpfc_used_cpu == NULL) {
5534 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
5536 if (!lpfc_used_cpu) {
5537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5538 "3335 Failed allocate memory for msi-x "
5539 "interrupt vector mapping\n");
5540 kfree(phba->sli4_hba.cpu_map);
5544 for (i = 0; i < lpfc_present_cpu; i++)
5545 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
5548 /* Initialize io channels for round robin */
5549 cpup = phba->sli4_hba.cpu_map;
5551 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5552 cpup->channel_id = rc;
5554 if (rc >= phba->cfg_fcp_io_channel)
5559 * Enable sr-iov virtual functions if supported and configured
5560 * through the module parameter.
5562 if (phba->cfg_sriov_nr_virtfn > 0) {
5563 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5564 phba->cfg_sriov_nr_virtfn);
5566 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5567 "3020 Requested number of SR-IOV "
5568 "virtual functions (%d) is not "
5570 phba->cfg_sriov_nr_virtfn);
5571 phba->cfg_sriov_nr_virtfn = 0;
5578 kfree(phba->sli4_hba.msix_entries);
5579 out_free_fcp_eq_hdl:
5580 kfree(phba->sli4_hba.fcp_eq_hdl);
5581 out_free_fcf_rr_bmask:
5582 kfree(phba->fcf.fcf_rr_bmask);
5583 out_remove_rpi_hdrs:
5584 lpfc_sli4_remove_rpi_hdrs(phba);
5585 out_free_active_sgl:
5586 lpfc_free_active_sgl(phba);
5587 out_destroy_cq_event_pool:
5588 lpfc_sli4_cq_event_pool_destroy(phba);
5590 lpfc_destroy_bootstrap_mbox(phba);
5592 lpfc_mem_free(phba);
5597 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
5598 * @phba: pointer to lpfc hba data structure.
5600 * This routine is invoked to unset the driver internal resources set up
5601 * specific for supporting the SLI-4 HBA device it attached to.
5604 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5606 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5608 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
5609 kfree(phba->sli4_hba.cpu_map);
5610 phba->sli4_hba.num_present_cpu = 0;
5611 phba->sli4_hba.num_online_cpu = 0;
5612 phba->sli4_hba.curr_disp_cpu = 0;
5614 /* Free memory allocated for msi-x interrupt vector entries */
5615 kfree(phba->sli4_hba.msix_entries);
5617 /* Free memory allocated for fast-path work queue handles */
5618 kfree(phba->sli4_hba.fcp_eq_hdl);
5620 /* Free the allocated rpi headers. */
5621 lpfc_sli4_remove_rpi_hdrs(phba);
5622 lpfc_sli4_remove_rpis(phba);
5624 /* Free eligible FCF index bmask */
5625 kfree(phba->fcf.fcf_rr_bmask);
5627 /* Free the ELS sgl list */
5628 lpfc_free_active_sgl(phba);
5629 lpfc_free_els_sgl_list(phba);
5631 /* Free the completion queue EQ event pool */
5632 lpfc_sli4_cq_event_release_all(phba);
5633 lpfc_sli4_cq_event_pool_destroy(phba);
5635 /* Release resource identifiers. */
5636 lpfc_sli4_dealloc_resource_identifiers(phba);
5638 /* Free the bsmbx region. */
5639 lpfc_destroy_bootstrap_mbox(phba);
5641 /* Free the SLI Layer memory with SLI4 HBAs */
5642 lpfc_mem_free_all(phba);
5644 /* Free the current connect table */
5645 list_for_each_entry_safe(conn_entry, next_conn_entry,
5646 &phba->fcf_conn_rec_list, list) {
5647 list_del_init(&conn_entry->list);
5655 * lpfc_init_api_table_setup - Set up init api function jump table
5656 * @phba: The hba struct for which this call is being executed.
5657 * @dev_grp: The HBA PCI-Device group number.
5659 * This routine sets up the device INIT interface API function jump table
5662 * Returns: 0 - success, -ENODEV - failure.
5665 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5667 phba->lpfc_hba_init_link = lpfc_hba_init_link;
5668 phba->lpfc_hba_down_link = lpfc_hba_down_link;
5669 phba->lpfc_selective_reset = lpfc_selective_reset;
5671 case LPFC_PCI_DEV_LP:
5672 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
5673 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
5674 phba->lpfc_stop_port = lpfc_stop_port_s3;
5676 case LPFC_PCI_DEV_OC:
5677 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
5678 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
5679 phba->lpfc_stop_port = lpfc_stop_port_s4;
5682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5683 "1431 Invalid HBA PCI-device group: 0x%x\n",
5692 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
5693 * @phba: pointer to lpfc hba data structure.
5695 * This routine is invoked to set up the driver internal resources before the
5696 * device specific resource setup to support the HBA device it attached to.
5700 * other values - error
5703 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5706 * Driver resources common to all SLI revisions
5708 atomic_set(&phba->fast_event_count, 0);
5709 spin_lock_init(&phba->hbalock);
5711 /* Initialize ndlp management spinlock */
5712 spin_lock_init(&phba->ndlp_lock);
5714 INIT_LIST_HEAD(&phba->port_list);
5715 INIT_LIST_HEAD(&phba->work_list);
5716 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5718 /* Initialize the wait queue head for the kernel thread */
5719 init_waitqueue_head(&phba->work_waitq);
5721 /* Initialize the scsi buffer list used by driver for scsi IO */
5722 spin_lock_init(&phba->scsi_buf_list_get_lock);
5723 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5724 spin_lock_init(&phba->scsi_buf_list_put_lock);
5725 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5727 /* Initialize the fabric iocb list */
5728 INIT_LIST_HEAD(&phba->fabric_iocb_list);
5730 /* Initialize list to save ELS buffers */
5731 INIT_LIST_HEAD(&phba->elsbuf);
5733 /* Initialize FCF connection rec list */
5734 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5736 /* Initialize OAS configuration list */
5737 spin_lock_init(&phba->devicelock);
5738 INIT_LIST_HEAD(&phba->luns);
5744 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
5745 * @phba: pointer to lpfc hba data structure.
5747 * This routine is invoked to set up the driver internal resources after the
5748 * device specific resource setup to support the HBA device it attached to.
5752 * other values - error
5755 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
5759 /* Startup the kernel thread for this host adapter. */
5760 phba->worker_thread = kthread_run(lpfc_do_work, phba,
5761 "lpfc_worker_%d", phba->brd_no);
5762 if (IS_ERR(phba->worker_thread)) {
5763 error = PTR_ERR(phba->worker_thread);
5771 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
5772 * @phba: pointer to lpfc hba data structure.
5774 * This routine is invoked to unset the driver internal resources set up after
5775 * the device specific resource setup for supporting the HBA device it
5779 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
5781 /* Stop kernel worker thread */
5782 kthread_stop(phba->worker_thread);
5786 * lpfc_free_iocb_list - Free iocb list.
5787 * @phba: pointer to lpfc hba data structure.
5789 * This routine is invoked to free the driver's IOCB list and memory.
5792 lpfc_free_iocb_list(struct lpfc_hba *phba)
5794 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
5796 spin_lock_irq(&phba->hbalock);
5797 list_for_each_entry_safe(iocbq_entry, iocbq_next,
5798 &phba->lpfc_iocb_list, list) {
5799 list_del(&iocbq_entry->list);
5801 phba->total_iocbq_bufs--;
5803 spin_unlock_irq(&phba->hbalock);
5809 * lpfc_init_iocb_list - Allocate and initialize iocb list.
5810 * @phba: pointer to lpfc hba data structure.
5812 * This routine is invoked to allocate and initizlize the driver's IOCB
5813 * list and set up the IOCB tag array accordingly.
5817 * other values - error
5820 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
5822 struct lpfc_iocbq *iocbq_entry = NULL;
5826 /* Initialize and populate the iocb list per host. */
5827 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
5828 for (i = 0; i < iocb_count; i++) {
5829 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
5830 if (iocbq_entry == NULL) {
5831 printk(KERN_ERR "%s: only allocated %d iocbs of "
5832 "expected %d count. Unloading driver.\n",
5833 __func__, i, LPFC_IOCB_LIST_CNT);
5834 goto out_free_iocbq;
5837 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
5840 printk(KERN_ERR "%s: failed to allocate IOTAG. "
5841 "Unloading driver.\n", __func__);
5842 goto out_free_iocbq;
5844 iocbq_entry->sli4_lxritag = NO_XRI;
5845 iocbq_entry->sli4_xritag = NO_XRI;
5847 spin_lock_irq(&phba->hbalock);
5848 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
5849 phba->total_iocbq_bufs++;
5850 spin_unlock_irq(&phba->hbalock);
5856 lpfc_free_iocb_list(phba);
5862 * lpfc_free_sgl_list - Free a given sgl list.
5863 * @phba: pointer to lpfc hba data structure.
5864 * @sglq_list: pointer to the head of sgl list.
5866 * This routine is invoked to free a give sgl list and memory.
5869 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5871 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5873 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5874 list_del(&sglq_entry->list);
5875 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5881 * lpfc_free_els_sgl_list - Free els sgl list.
5882 * @phba: pointer to lpfc hba data structure.
5884 * This routine is invoked to free the driver's els sgl list and memory.
5887 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5889 LIST_HEAD(sglq_list);
5890 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5892 /* Retrieve all els sgls from driver list */
5893 spin_lock_irq(&phba->hbalock);
5894 spin_lock(&pring->ring_lock);
5895 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5896 spin_unlock(&pring->ring_lock);
5897 spin_unlock_irq(&phba->hbalock);
5899 /* Now free the sgl list */
5900 lpfc_free_sgl_list(phba, &sglq_list);
5904 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
5905 * @phba: pointer to lpfc hba data structure.
5907 * This routine is invoked to allocate the driver's active sgl memory.
5908 * This array will hold the sglq_entry's for active IOs.
5911 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5914 size = sizeof(struct lpfc_sglq *);
5915 size *= phba->sli4_hba.max_cfg_param.max_xri;
5917 phba->sli4_hba.lpfc_sglq_active_list =
5918 kzalloc(size, GFP_KERNEL);
5919 if (!phba->sli4_hba.lpfc_sglq_active_list)
5925 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
5926 * @phba: pointer to lpfc hba data structure.
5928 * This routine is invoked to walk through the array of active sglq entries
5929 * and free all of the resources.
5930 * This is just a place holder for now.
5933 lpfc_free_active_sgl(struct lpfc_hba *phba)
5935 kfree(phba->sli4_hba.lpfc_sglq_active_list);
5939 * lpfc_init_sgl_list - Allocate and initialize sgl list.
5940 * @phba: pointer to lpfc hba data structure.
5942 * This routine is invoked to allocate and initizlize the driver's sgl
5943 * list and set up the sgl xritag tag array accordingly.
5947 lpfc_init_sgl_list(struct lpfc_hba *phba)
5949 /* Initialize and populate the sglq list per host/VF. */
5950 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5951 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5953 /* els xri-sgl book keeping */
5954 phba->sli4_hba.els_xri_cnt = 0;
5956 /* scsi xri-buffer book keeping */
5957 phba->sli4_hba.scsi_xri_cnt = 0;
5961 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
5962 * @phba: pointer to lpfc hba data structure.
5964 * This routine is invoked to post rpi header templates to the
5965 * port for those SLI4 ports that do not support extents. This routine
5966 * posts a PAGE_SIZE memory region to the port to hold up to
5967 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
5968 * and should be called only when interrupts are disabled.
5972 * -ERROR - otherwise.
5975 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5978 struct lpfc_rpi_hdr *rpi_hdr;
5980 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5981 if (!phba->sli4_hba.rpi_hdrs_in_use)
5983 if (phba->sli4_hba.extents_in_use)
5986 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5988 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5989 "0391 Error during rpi post operation\n");
5990 lpfc_sli4_remove_rpis(phba);
5998 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
5999 * @phba: pointer to lpfc hba data structure.
6001 * This routine is invoked to allocate a single 4KB memory region to
6002 * support rpis and stores them in the phba. This single region
6003 * provides support for up to 64 rpis. The region is used globally
6007 * A valid rpi hdr on success.
6008 * A NULL pointer on any failure.
6010 struct lpfc_rpi_hdr *
6011 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6013 uint16_t rpi_limit, curr_rpi_range;
6014 struct lpfc_dmabuf *dmabuf;
6015 struct lpfc_rpi_hdr *rpi_hdr;
6019 * If the SLI4 port supports extents, posting the rpi header isn't
6020 * required. Set the expected maximum count and let the actual value
6021 * get set when extents are fully allocated.
6023 if (!phba->sli4_hba.rpi_hdrs_in_use)
6025 if (phba->sli4_hba.extents_in_use)
6028 /* The limit on the logical index is just the max_rpi count. */
6029 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
6030 phba->sli4_hba.max_cfg_param.max_rpi - 1;
6032 spin_lock_irq(&phba->hbalock);
6034 * Establish the starting RPI in this header block. The starting
6035 * rpi is normalized to a zero base because the physical rpi is
6038 curr_rpi_range = phba->sli4_hba.next_rpi;
6039 spin_unlock_irq(&phba->hbalock);
6042 * The port has a limited number of rpis. The increment here
6043 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
6044 * and to allow the full max_rpi range per port.
6046 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
6047 rpi_count = rpi_limit - curr_rpi_range;
6049 rpi_count = LPFC_RPI_HDR_COUNT;
6054 * First allocate the protocol header region for the port. The
6055 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
6057 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6061 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
6062 LPFC_HDR_TEMPLATE_SIZE,
6063 &dmabuf->phys, GFP_KERNEL);
6064 if (!dmabuf->virt) {
6066 goto err_free_dmabuf;
6069 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
6071 goto err_free_coherent;
6074 /* Save the rpi header data for cleanup later. */
6075 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
6077 goto err_free_coherent;
6079 rpi_hdr->dmabuf = dmabuf;
6080 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
6081 rpi_hdr->page_count = 1;
6082 spin_lock_irq(&phba->hbalock);
6084 /* The rpi_hdr stores the logical index only. */
6085 rpi_hdr->start_rpi = curr_rpi_range;
6086 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
6089 * The next_rpi stores the next logical module-64 rpi value used
6090 * to post physical rpis in subsequent rpi postings.
6092 phba->sli4_hba.next_rpi += rpi_count;
6093 spin_unlock_irq(&phba->hbalock);
6097 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
6098 dmabuf->virt, dmabuf->phys);
6105 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
6106 * @phba: pointer to lpfc hba data structure.
6108 * This routine is invoked to remove all memory resources allocated
6109 * to support rpis for SLI4 ports not supporting extents. This routine
6110 * presumes the caller has released all rpis consumed by fabric or port
6111 * logins and is prepared to have the header pages removed.
6114 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
6116 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
6118 if (!phba->sli4_hba.rpi_hdrs_in_use)
6121 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
6122 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
6123 list_del(&rpi_hdr->list);
6124 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
6125 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
6126 kfree(rpi_hdr->dmabuf);
6130 /* There are no rpis available to the port now. */
6131 phba->sli4_hba.next_rpi = 0;
6135 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
6136 * @pdev: pointer to pci device data structure.
6138 * This routine is invoked to allocate the driver hba data structure for an
6139 * HBA device. If the allocation is successful, the phba reference to the
6140 * PCI device data structure is set.
6143 * pointer to @phba - successful
6146 static struct lpfc_hba *
6147 lpfc_hba_alloc(struct pci_dev *pdev)
6149 struct lpfc_hba *phba;
6151 /* Allocate memory for HBA structure */
6152 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
6154 dev_err(&pdev->dev, "failed to allocate hba struct\n");
6158 /* Set reference to PCI device in HBA structure */
6159 phba->pcidev = pdev;
6161 /* Assign an unused board number */
6162 phba->brd_no = lpfc_get_instance();
6163 if (phba->brd_no < 0) {
6167 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
6169 spin_lock_init(&phba->ct_ev_lock);
6170 INIT_LIST_HEAD(&phba->ct_ev_waiters);
6176 * lpfc_hba_free - Free driver hba data structure with a device.
6177 * @phba: pointer to lpfc hba data structure.
6179 * This routine is invoked to free the driver hba data structure with an
6183 lpfc_hba_free(struct lpfc_hba *phba)
6185 /* Release the driver assigned board number */
6186 idr_remove(&lpfc_hba_index, phba->brd_no);
6188 /* Free memory allocated with sli rings */
6189 kfree(phba->sli.ring);
6190 phba->sli.ring = NULL;
6197 * lpfc_create_shost - Create hba physical port with associated scsi host.
6198 * @phba: pointer to lpfc hba data structure.
6200 * This routine is invoked to create HBA physical port and associate a SCSI
6205 * other values - error
6208 lpfc_create_shost(struct lpfc_hba *phba)
6210 struct lpfc_vport *vport;
6211 struct Scsi_Host *shost;
6213 /* Initialize HBA FC structure */
6214 phba->fc_edtov = FF_DEF_EDTOV;
6215 phba->fc_ratov = FF_DEF_RATOV;
6216 phba->fc_altov = FF_DEF_ALTOV;
6217 phba->fc_arbtov = FF_DEF_ARBTOV;
6219 atomic_set(&phba->sdev_cnt, 0);
6220 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
6224 shost = lpfc_shost_from_vport(vport);
6225 phba->pport = vport;
6226 lpfc_debugfs_initialize(vport);
6227 /* Put reference to SCSI host to driver's device private data */
6228 pci_set_drvdata(phba->pcidev, shost);
6231 * At this point we are fully registered with PSA. In addition,
6232 * any initial discovery should be completed.
6234 vport->load_flag |= FC_ALLOW_FDMI;
6235 if (phba->cfg_enable_SmartSAN ||
6236 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
6238 /* Setup appropriate attribute masks */
6239 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
6240 if (phba->cfg_enable_SmartSAN)
6241 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
6243 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
6249 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
6250 * @phba: pointer to lpfc hba data structure.
6252 * This routine is invoked to destroy HBA physical port and the associated
6256 lpfc_destroy_shost(struct lpfc_hba *phba)
6258 struct lpfc_vport *vport = phba->pport;
6260 /* Destroy physical port that associated with the SCSI host */
6261 destroy_port(vport);
6267 * lpfc_setup_bg - Setup Block guard structures and debug areas.
6268 * @phba: pointer to lpfc hba data structure.
6269 * @shost: the shost to be used to detect Block guard settings.
6271 * This routine sets up the local Block guard protocol settings for @shost.
6272 * This routine also allocates memory for debugging bg buffers.
6275 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
6281 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
6282 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6283 "1478 Registering BlockGuard with the "
6286 old_mask = phba->cfg_prot_mask;
6287 old_guard = phba->cfg_prot_guard;
6289 /* Only allow supported values */
6290 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
6291 SHOST_DIX_TYPE0_PROTECTION |
6292 SHOST_DIX_TYPE1_PROTECTION);
6293 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
6294 SHOST_DIX_GUARD_CRC);
6296 /* DIF Type 1 protection for profiles AST1/C1 is end to end */
6297 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
6298 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
6300 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
6301 if ((old_mask != phba->cfg_prot_mask) ||
6302 (old_guard != phba->cfg_prot_guard))
6303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6304 "1475 Registering BlockGuard with the "
6305 "SCSI layer: mask %d guard %d\n",
6306 phba->cfg_prot_mask,
6307 phba->cfg_prot_guard);
6309 scsi_host_set_prot(shost, phba->cfg_prot_mask);
6310 scsi_host_set_guard(shost, phba->cfg_prot_guard);
6312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6313 "1479 Not Registering BlockGuard with the SCSI "
6314 "layer, Bad protection parameters: %d %d\n",
6315 old_mask, old_guard);
6318 if (!_dump_buf_data) {
6320 spin_lock_init(&_dump_buf_lock);
6322 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6323 if (_dump_buf_data) {
6324 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6325 "9043 BLKGRD: allocated %d pages for "
6326 "_dump_buf_data at 0x%p\n",
6327 (1 << pagecnt), _dump_buf_data);
6328 _dump_buf_data_order = pagecnt;
6329 memset(_dump_buf_data, 0,
6330 ((1 << PAGE_SHIFT) << pagecnt));
6335 if (!_dump_buf_data_order)
6336 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6337 "9044 BLKGRD: ERROR unable to allocate "
6338 "memory for hexdump\n");
6340 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6341 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
6342 "\n", _dump_buf_data);
6343 if (!_dump_buf_dif) {
6346 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
6347 if (_dump_buf_dif) {
6348 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6349 "9046 BLKGRD: allocated %d pages for "
6350 "_dump_buf_dif at 0x%p\n",
6351 (1 << pagecnt), _dump_buf_dif);
6352 _dump_buf_dif_order = pagecnt;
6353 memset(_dump_buf_dif, 0,
6354 ((1 << PAGE_SHIFT) << pagecnt));
6359 if (!_dump_buf_dif_order)
6360 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6361 "9047 BLKGRD: ERROR unable to allocate "
6362 "memory for hexdump\n");
6364 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6365 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
6370 * lpfc_post_init_setup - Perform necessary device post initialization setup.
6371 * @phba: pointer to lpfc hba data structure.
6373 * This routine is invoked to perform all the necessary post initialization
6374 * setup for the device.
6377 lpfc_post_init_setup(struct lpfc_hba *phba)
6379 struct Scsi_Host *shost;
6380 struct lpfc_adapter_event_header adapter_event;
6382 /* Get the default values for Model Name and Description */
6383 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
6386 * hba setup may have changed the hba_queue_depth so we need to
6387 * adjust the value of can_queue.
6389 shost = pci_get_drvdata(phba->pcidev);
6390 shost->can_queue = phba->cfg_hba_queue_depth - 10;
6391 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
6392 lpfc_setup_bg(phba, shost);
6394 lpfc_host_attrib_init(shost);
6396 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
6397 spin_lock_irq(shost->host_lock);
6398 lpfc_poll_start_timer(phba);
6399 spin_unlock_irq(shost->host_lock);
6402 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6403 "0428 Perform SCSI scan\n");
6404 /* Send board arrival event to upper layer */
6405 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
6406 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
6407 fc_host_post_vendor_event(shost, fc_get_event_number(),
6408 sizeof(adapter_event),
6409 (char *) &adapter_event,
6415 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
6416 * @phba: pointer to lpfc hba data structure.
6418 * This routine is invoked to set up the PCI device memory space for device
6419 * with SLI-3 interface spec.
6423 * other values - error
6426 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
6428 struct pci_dev *pdev;
6429 unsigned long bar0map_len, bar2map_len;
6432 int error = -ENODEV;
6434 /* Obtain PCI device reference */
6438 pdev = phba->pcidev;
6440 /* Set the device DMA mask size */
6441 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6442 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6443 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6444 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6449 /* Get the bus address of Bar0 and Bar2 and the number of bytes
6450 * required by each mapping.
6452 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6453 bar0map_len = pci_resource_len(pdev, 0);
6455 phba->pci_bar2_map = pci_resource_start(pdev, 2);
6456 bar2map_len = pci_resource_len(pdev, 2);
6458 /* Map HBA SLIM to a kernel virtual address. */
6459 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
6460 if (!phba->slim_memmap_p) {
6461 dev_printk(KERN_ERR, &pdev->dev,
6462 "ioremap failed for SLIM memory.\n");
6466 /* Map HBA Control Registers to a kernel virtual address. */
6467 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
6468 if (!phba->ctrl_regs_memmap_p) {
6469 dev_printk(KERN_ERR, &pdev->dev,
6470 "ioremap failed for HBA control registers.\n");
6471 goto out_iounmap_slim;
6474 /* Allocate memory for SLI-2 structures */
6475 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6476 &phba->slim2p.phys, GFP_KERNEL);
6477 if (!phba->slim2p.virt)
6480 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
6481 phba->mbox_ext = (phba->slim2p.virt +
6482 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
6483 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
6484 phba->IOCBs = (phba->slim2p.virt +
6485 offsetof(struct lpfc_sli2_slim, IOCBs));
6487 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
6488 lpfc_sli_hbq_size(),
6489 &phba->hbqslimp.phys,
6491 if (!phba->hbqslimp.virt)
6494 hbq_count = lpfc_sli_hbq_count();
6495 ptr = phba->hbqslimp.virt;
6496 for (i = 0; i < hbq_count; ++i) {
6497 phba->hbqs[i].hbq_virt = ptr;
6498 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
6499 ptr += (lpfc_hbq_defs[i]->entry_count *
6500 sizeof(struct lpfc_hbq_entry));
6502 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
6503 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
6505 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
6507 INIT_LIST_HEAD(&phba->rb_pend_list);
6509 phba->MBslimaddr = phba->slim_memmap_p;
6510 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
6511 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
6512 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
6513 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
6518 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6519 phba->slim2p.virt, phba->slim2p.phys);
6521 iounmap(phba->ctrl_regs_memmap_p);
6523 iounmap(phba->slim_memmap_p);
6529 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
6530 * @phba: pointer to lpfc hba data structure.
6532 * This routine is invoked to unset the PCI device memory space for device
6533 * with SLI-3 interface spec.
6536 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
6538 struct pci_dev *pdev;
6540 /* Obtain PCI device reference */
6544 pdev = phba->pcidev;
6546 /* Free coherent DMA memory allocated */
6547 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6548 phba->hbqslimp.virt, phba->hbqslimp.phys);
6549 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6550 phba->slim2p.virt, phba->slim2p.phys);
6552 /* I/O memory unmap */
6553 iounmap(phba->ctrl_regs_memmap_p);
6554 iounmap(phba->slim_memmap_p);
6560 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
6561 * @phba: pointer to lpfc hba data structure.
6563 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
6564 * done and check status.
6566 * Return 0 if successful, otherwise -ENODEV.
6569 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
6571 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
6572 struct lpfc_register reg_data;
6573 int i, port_error = 0;
6576 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
6577 memset(®_data, 0, sizeof(reg_data));
6578 if (!phba->sli4_hba.PSMPHRregaddr)
6581 /* Wait up to 30 seconds for the SLI Port POST done and ready */
6582 for (i = 0; i < 3000; i++) {
6583 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
6584 &portsmphr_reg.word0) ||
6585 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
6586 /* Port has a fatal POST error, break out */
6587 port_error = -ENODEV;
6590 if (LPFC_POST_STAGE_PORT_READY ==
6591 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
6597 * If there was a port error during POST, then don't proceed with
6598 * other register reads as the data may not be valid. Just exit.
6601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6602 "1408 Port Failed POST - portsmphr=0x%x, "
6603 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6604 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6605 portsmphr_reg.word0,
6606 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
6607 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
6608 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
6609 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
6610 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
6611 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
6612 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
6613 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
6615 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6616 "2534 Device Info: SLIFamily=0x%x, "
6617 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6618 "SLIHint_2=0x%x, FT=0x%x\n",
6619 bf_get(lpfc_sli_intf_sli_family,
6620 &phba->sli4_hba.sli_intf),
6621 bf_get(lpfc_sli_intf_slirev,
6622 &phba->sli4_hba.sli_intf),
6623 bf_get(lpfc_sli_intf_if_type,
6624 &phba->sli4_hba.sli_intf),
6625 bf_get(lpfc_sli_intf_sli_hint1,
6626 &phba->sli4_hba.sli_intf),
6627 bf_get(lpfc_sli_intf_sli_hint2,
6628 &phba->sli4_hba.sli_intf),
6629 bf_get(lpfc_sli_intf_func_type,
6630 &phba->sli4_hba.sli_intf));
6632 * Check for other Port errors during the initialization
6633 * process. Fail the load if the port did not come up
6636 if_type = bf_get(lpfc_sli_intf_if_type,
6637 &phba->sli4_hba.sli_intf);
6639 case LPFC_SLI_INTF_IF_TYPE_0:
6640 phba->sli4_hba.ue_mask_lo =
6641 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
6642 phba->sli4_hba.ue_mask_hi =
6643 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
6645 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
6647 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
6648 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
6649 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
6650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6651 "1422 Unrecoverable Error "
6652 "Detected during POST "
6653 "uerr_lo_reg=0x%x, "
6654 "uerr_hi_reg=0x%x, "
6655 "ue_mask_lo_reg=0x%x, "
6656 "ue_mask_hi_reg=0x%x\n",
6659 phba->sli4_hba.ue_mask_lo,
6660 phba->sli4_hba.ue_mask_hi);
6661 port_error = -ENODEV;
6664 case LPFC_SLI_INTF_IF_TYPE_2:
6665 /* Final checks. The port status should be clean. */
6666 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6668 (bf_get(lpfc_sliport_status_err, ®_data) &&
6669 !bf_get(lpfc_sliport_status_rn, ®_data))) {
6670 phba->work_status[0] =
6671 readl(phba->sli4_hba.u.if_type2.
6673 phba->work_status[1] =
6674 readl(phba->sli4_hba.u.if_type2.
6676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6677 "2888 Unrecoverable port error "
6678 "following POST: port status reg "
6679 "0x%x, port_smphr reg 0x%x, "
6680 "error 1=0x%x, error 2=0x%x\n",
6682 portsmphr_reg.word0,
6683 phba->work_status[0],
6684 phba->work_status[1]);
6685 port_error = -ENODEV;
6688 case LPFC_SLI_INTF_IF_TYPE_1:
6697 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
6698 * @phba: pointer to lpfc hba data structure.
6699 * @if_type: The SLI4 interface type getting configured.
6701 * This routine is invoked to set up SLI4 BAR0 PCI config space register
6705 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6708 case LPFC_SLI_INTF_IF_TYPE_0:
6709 phba->sli4_hba.u.if_type0.UERRLOregaddr =
6710 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
6711 phba->sli4_hba.u.if_type0.UERRHIregaddr =
6712 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
6713 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
6714 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
6715 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
6716 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
6717 phba->sli4_hba.SLIINTFregaddr =
6718 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6720 case LPFC_SLI_INTF_IF_TYPE_2:
6721 phba->sli4_hba.u.if_type2.ERR1regaddr =
6722 phba->sli4_hba.conf_regs_memmap_p +
6723 LPFC_CTL_PORT_ER1_OFFSET;
6724 phba->sli4_hba.u.if_type2.ERR2regaddr =
6725 phba->sli4_hba.conf_regs_memmap_p +
6726 LPFC_CTL_PORT_ER2_OFFSET;
6727 phba->sli4_hba.u.if_type2.CTRLregaddr =
6728 phba->sli4_hba.conf_regs_memmap_p +
6729 LPFC_CTL_PORT_CTL_OFFSET;
6730 phba->sli4_hba.u.if_type2.STATUSregaddr =
6731 phba->sli4_hba.conf_regs_memmap_p +
6732 LPFC_CTL_PORT_STA_OFFSET;
6733 phba->sli4_hba.SLIINTFregaddr =
6734 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6735 phba->sli4_hba.PSMPHRregaddr =
6736 phba->sli4_hba.conf_regs_memmap_p +
6737 LPFC_CTL_PORT_SEM_OFFSET;
6738 phba->sli4_hba.RQDBregaddr =
6739 phba->sli4_hba.conf_regs_memmap_p +
6740 LPFC_ULP0_RQ_DOORBELL;
6741 phba->sli4_hba.WQDBregaddr =
6742 phba->sli4_hba.conf_regs_memmap_p +
6743 LPFC_ULP0_WQ_DOORBELL;
6744 phba->sli4_hba.EQCQDBregaddr =
6745 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6746 phba->sli4_hba.MQDBregaddr =
6747 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
6748 phba->sli4_hba.BMBXregaddr =
6749 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
6751 case LPFC_SLI_INTF_IF_TYPE_1:
6753 dev_printk(KERN_ERR, &phba->pcidev->dev,
6754 "FATAL - unsupported SLI4 interface type - %d\n",
6761 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
6762 * @phba: pointer to lpfc hba data structure.
6764 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
6768 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
6770 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6771 LPFC_SLIPORT_IF0_SMPHR;
6772 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6774 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6776 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6781 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
6782 * @phba: pointer to lpfc hba data structure.
6783 * @vf: virtual function number
6785 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
6786 * based on the given viftual function number, @vf.
6788 * Return 0 if successful, otherwise -ENODEV.
6791 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6793 if (vf > LPFC_VIR_FUNC_MAX)
6796 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6797 vf * LPFC_VFR_PAGE_SIZE +
6798 LPFC_ULP0_RQ_DOORBELL);
6799 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6800 vf * LPFC_VFR_PAGE_SIZE +
6801 LPFC_ULP0_WQ_DOORBELL);
6802 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6803 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6804 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6805 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
6806 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6807 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
6812 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
6813 * @phba: pointer to lpfc hba data structure.
6815 * This routine is invoked to create the bootstrap mailbox
6816 * region consistent with the SLI-4 interface spec. This
6817 * routine allocates all memory necessary to communicate
6818 * mailbox commands to the port and sets up all alignment
6819 * needs. No locks are expected to be held when calling
6824 * -ENOMEM - could not allocated memory.
6827 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
6830 struct lpfc_dmabuf *dmabuf;
6831 struct dma_address *dma_address;
6835 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6840 * The bootstrap mailbox region is comprised of 2 parts
6841 * plus an alignment restriction of 16 bytes.
6843 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6844 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
6845 &dmabuf->phys, GFP_KERNEL);
6846 if (!dmabuf->virt) {
6852 * Initialize the bootstrap mailbox pointers now so that the register
6853 * operations are simple later. The mailbox dma address is required
6854 * to be 16-byte aligned. Also align the virtual memory as each
6855 * maibox is copied into the bmbx mailbox region before issuing the
6856 * command to the port.
6858 phba->sli4_hba.bmbx.dmabuf = dmabuf;
6859 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6861 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6862 LPFC_ALIGN_16_BYTE);
6863 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6864 LPFC_ALIGN_16_BYTE);
6867 * Set the high and low physical addresses now. The SLI4 alignment
6868 * requirement is 16 bytes and the mailbox is posted to the port
6869 * as two 30-bit addresses. The other data is a bit marking whether
6870 * the 30-bit address is the high or low address.
6871 * Upcast bmbx aphys to 64bits so shift instruction compiles
6872 * clean on 32 bit machines.
6874 dma_address = &phba->sli4_hba.bmbx.dma_address;
6875 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6876 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6877 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6878 LPFC_BMBX_BIT1_ADDR_HI);
6880 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6881 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6882 LPFC_BMBX_BIT1_ADDR_LO);
6887 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
6888 * @phba: pointer to lpfc hba data structure.
6890 * This routine is invoked to teardown the bootstrap mailbox
6891 * region and release all host resources. This routine requires
6892 * the caller to ensure all mailbox commands recovered, no
6893 * additional mailbox comands are sent, and interrupts are disabled
6894 * before calling this routine.
6898 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6900 dma_free_coherent(&phba->pcidev->dev,
6901 phba->sli4_hba.bmbx.bmbx_size,
6902 phba->sli4_hba.bmbx.dmabuf->virt,
6903 phba->sli4_hba.bmbx.dmabuf->phys);
6905 kfree(phba->sli4_hba.bmbx.dmabuf);
6906 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6910 * lpfc_sli4_read_config - Get the config parameters.
6911 * @phba: pointer to lpfc hba data structure.
6913 * This routine is invoked to read the configuration parameters from the HBA.
6914 * The configuration parameters are used to set the base and maximum values
6915 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
6916 * allocation for the port.
6920 * -ENOMEM - No available memory
6921 * -EIO - The mailbox failed to complete successfully.
6924 lpfc_sli4_read_config(struct lpfc_hba *phba)
6927 struct lpfc_mbx_read_config *rd_config;
6928 union lpfc_sli4_cfg_shdr *shdr;
6929 uint32_t shdr_status, shdr_add_status;
6930 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6931 struct lpfc_rsrc_desc_fcfcoe *desc;
6933 uint16_t forced_link_speed;
6935 int length, i, rc = 0, rc2;
6937 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6940 "2011 Unable to allocate memory for issuing "
6941 "SLI_CONFIG_SPECIAL mailbox command\n");
6945 lpfc_read_config(phba, pmb);
6947 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6948 if (rc != MBX_SUCCESS) {
6949 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6950 "2012 Mailbox failed , mbxCmd x%x "
6951 "READ_CONFIG, mbxStatus x%x\n",
6952 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6953 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6956 rd_config = &pmb->u.mqe.un.rd_config;
6957 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6958 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6959 phba->sli4_hba.lnk_info.lnk_tp =
6960 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6961 phba->sli4_hba.lnk_info.lnk_no =
6962 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6963 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6964 "3081 lnk_type:%d, lnk_numb:%d\n",
6965 phba->sli4_hba.lnk_info.lnk_tp,
6966 phba->sli4_hba.lnk_info.lnk_no);
6968 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6969 "3082 Mailbox (x%x) returned ldv:x0\n",
6970 bf_get(lpfc_mqe_command, &pmb->u.mqe));
6971 phba->sli4_hba.extents_in_use =
6972 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6973 phba->sli4_hba.max_cfg_param.max_xri =
6974 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6975 phba->sli4_hba.max_cfg_param.xri_base =
6976 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6977 phba->sli4_hba.max_cfg_param.max_vpi =
6978 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6979 phba->sli4_hba.max_cfg_param.vpi_base =
6980 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6981 phba->sli4_hba.max_cfg_param.max_rpi =
6982 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6983 phba->sli4_hba.max_cfg_param.rpi_base =
6984 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6985 phba->sli4_hba.max_cfg_param.max_vfi =
6986 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6987 phba->sli4_hba.max_cfg_param.vfi_base =
6988 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6989 phba->sli4_hba.max_cfg_param.max_fcfi =
6990 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6991 phba->sli4_hba.max_cfg_param.max_eq =
6992 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6993 phba->sli4_hba.max_cfg_param.max_rq =
6994 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6995 phba->sli4_hba.max_cfg_param.max_wq =
6996 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6997 phba->sli4_hba.max_cfg_param.max_cq =
6998 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6999 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
7000 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
7001 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
7002 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
7003 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
7004 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
7005 phba->max_vports = phba->max_vpi;
7006 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7007 "2003 cfg params Extents? %d "
7013 phba->sli4_hba.extents_in_use,
7014 phba->sli4_hba.max_cfg_param.xri_base,
7015 phba->sli4_hba.max_cfg_param.max_xri,
7016 phba->sli4_hba.max_cfg_param.vpi_base,
7017 phba->sli4_hba.max_cfg_param.max_vpi,
7018 phba->sli4_hba.max_cfg_param.vfi_base,
7019 phba->sli4_hba.max_cfg_param.max_vfi,
7020 phba->sli4_hba.max_cfg_param.rpi_base,
7021 phba->sli4_hba.max_cfg_param.max_rpi,
7022 phba->sli4_hba.max_cfg_param.max_fcfi);
7028 /* Update link speed if forced link speed is supported */
7029 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7030 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7032 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
7033 if (forced_link_speed) {
7034 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
7036 switch (forced_link_speed) {
7038 phba->cfg_link_speed =
7039 LPFC_USER_LINK_SPEED_1G;
7042 phba->cfg_link_speed =
7043 LPFC_USER_LINK_SPEED_2G;
7046 phba->cfg_link_speed =
7047 LPFC_USER_LINK_SPEED_4G;
7050 phba->cfg_link_speed =
7051 LPFC_USER_LINK_SPEED_8G;
7053 case LINK_SPEED_10G:
7054 phba->cfg_link_speed =
7055 LPFC_USER_LINK_SPEED_10G;
7057 case LINK_SPEED_16G:
7058 phba->cfg_link_speed =
7059 LPFC_USER_LINK_SPEED_16G;
7061 case LINK_SPEED_32G:
7062 phba->cfg_link_speed =
7063 LPFC_USER_LINK_SPEED_32G;
7066 phba->cfg_link_speed =
7067 LPFC_USER_LINK_SPEED_AUTO;
7070 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7071 "0047 Unrecognized link "
7074 phba->cfg_link_speed =
7075 LPFC_USER_LINK_SPEED_AUTO;
7080 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
7081 length = phba->sli4_hba.max_cfg_param.max_xri -
7082 lpfc_sli4_get_els_iocb_cnt(phba);
7083 if (phba->cfg_hba_queue_depth > length) {
7084 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7085 "3361 HBA queue depth changed from %d to %d\n",
7086 phba->cfg_hba_queue_depth, length);
7087 phba->cfg_hba_queue_depth = length;
7090 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
7091 LPFC_SLI_INTF_IF_TYPE_2)
7094 /* get the pf# and vf# for SLI4 if_type 2 port */
7095 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
7096 sizeof(struct lpfc_sli4_cfg_mhdr));
7097 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
7098 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
7099 length, LPFC_SLI4_MBX_EMBED);
7101 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7102 shdr = (union lpfc_sli4_cfg_shdr *)
7103 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7104 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7105 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7106 if (rc2 || shdr_status || shdr_add_status) {
7107 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7108 "3026 Mailbox failed , mbxCmd x%x "
7109 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
7110 bf_get(lpfc_mqe_command, &pmb->u.mqe),
7111 bf_get(lpfc_mqe_status, &pmb->u.mqe));
7115 /* search for fc_fcoe resrouce descriptor */
7116 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
7118 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
7119 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
7120 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
7121 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
7122 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
7123 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
7126 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
7127 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
7128 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
7129 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
7130 phba->sli4_hba.iov.pf_number =
7131 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
7132 phba->sli4_hba.iov.vf_number =
7133 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
7138 if (i < LPFC_RSRC_DESC_MAX_NUM)
7139 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7140 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
7141 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
7142 phba->sli4_hba.iov.vf_number);
7144 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7145 "3028 GET_FUNCTION_CONFIG: failed to find "
7146 "Resrouce Descriptor:x%x\n",
7147 LPFC_RSRC_DESC_TYPE_FCFCOE);
7150 mempool_free(pmb, phba->mbox_mem_pool);
7155 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
7156 * @phba: pointer to lpfc hba data structure.
7158 * This routine is invoked to setup the port-side endian order when
7159 * the port if_type is 0. This routine has no function for other
7164 * -ENOMEM - No available memory
7165 * -EIO - The mailbox failed to complete successfully.
7168 lpfc_setup_endian_order(struct lpfc_hba *phba)
7170 LPFC_MBOXQ_t *mboxq;
7171 uint32_t if_type, rc = 0;
7172 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
7173 HOST_ENDIAN_HIGH_WORD1};
7175 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7177 case LPFC_SLI_INTF_IF_TYPE_0:
7178 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7182 "0492 Unable to allocate memory for "
7183 "issuing SLI_CONFIG_SPECIAL mailbox "
7189 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
7190 * two words to contain special data values and no other data.
7192 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
7193 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
7194 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7195 if (rc != MBX_SUCCESS) {
7196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7197 "0493 SLI_CONFIG_SPECIAL mailbox "
7198 "failed with status x%x\n",
7202 mempool_free(mboxq, phba->mbox_mem_pool);
7204 case LPFC_SLI_INTF_IF_TYPE_2:
7205 case LPFC_SLI_INTF_IF_TYPE_1:
7213 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
7214 * @phba: pointer to lpfc hba data structure.
7216 * This routine is invoked to check the user settable queue counts for EQs and
7217 * CQs. after this routine is called the counts will be set to valid values that
7218 * adhere to the constraints of the system's interrupt vectors and the port's
7223 * -ENOMEM - No available memory
7226 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
7228 int cfg_fcp_io_channel;
7231 int fof_vectors = phba->cfg_fof ? 1 : 0;
7234 * Sanity check for configured queue parameters against the run-time
7238 /* Sanity check on HBA EQ parameters */
7239 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
7241 /* It doesn't make sense to have more io channels then online CPUs */
7242 for_each_present_cpu(cpu) {
7243 if (cpu_online(cpu))
7246 phba->sli4_hba.num_online_cpu = i;
7247 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7248 phba->sli4_hba.curr_disp_cpu = 0;
7250 if (i < cfg_fcp_io_channel) {
7251 lpfc_printf_log(phba,
7253 "3188 Reducing IO channels to match number of "
7254 "online CPUs: from %d to %d\n",
7255 cfg_fcp_io_channel, i);
7256 cfg_fcp_io_channel = i;
7259 if (cfg_fcp_io_channel + fof_vectors >
7260 phba->sli4_hba.max_cfg_param.max_eq) {
7261 if (phba->sli4_hba.max_cfg_param.max_eq <
7262 LPFC_FCP_IO_CHAN_MIN) {
7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7264 "2574 Not enough EQs (%d) from the "
7265 "pci function for supporting FCP "
7267 phba->sli4_hba.max_cfg_param.max_eq,
7268 phba->cfg_fcp_io_channel);
7271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7272 "2575 Reducing IO channels to match number of "
7273 "available EQs: from %d to %d\n",
7275 phba->sli4_hba.max_cfg_param.max_eq);
7276 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
7280 /* The actual number of FCP event queues adopted */
7281 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
7283 /* Get EQ depth from module parameter, fake the default for now */
7284 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
7285 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
7287 /* Get CQ depth from module parameter, fake the default for now */
7288 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
7289 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
7297 * lpfc_sli4_queue_create - Create all the SLI4 queues
7298 * @phba: pointer to lpfc hba data structure.
7300 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
7301 * operation. For each SLI4 queue type, the parameters such as queue entry
7302 * count (queue depth) shall be taken from the module parameter. For now,
7303 * we just use some constant number as place holder.
7307 * -ENOMEM - No availble memory
7308 * -EIO - The mailbox failed to complete successfully.
7311 lpfc_sli4_queue_create(struct lpfc_hba *phba)
7313 struct lpfc_queue *qdesc;
7318 * Create HBA Record arrays.
7320 if (!phba->cfg_fcp_io_channel)
7323 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
7324 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
7325 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
7326 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
7327 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
7328 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
7330 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
7331 phba->cfg_fcp_io_channel), GFP_KERNEL);
7332 if (!phba->sli4_hba.hba_eq) {
7333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7334 "2576 Failed allocate memory for "
7335 "fast-path EQ record array\n");
7339 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
7340 phba->cfg_fcp_io_channel), GFP_KERNEL);
7341 if (!phba->sli4_hba.fcp_cq) {
7342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7343 "2577 Failed allocate memory for fast-path "
7344 "CQ record array\n");
7348 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
7349 phba->cfg_fcp_io_channel), GFP_KERNEL);
7350 if (!phba->sli4_hba.fcp_wq) {
7351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7352 "2578 Failed allocate memory for fast-path "
7353 "WQ record array\n");
7358 * Since the first EQ can have multiple CQs associated with it,
7359 * this array is used to quickly see if we have a FCP fast-path
7362 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
7363 phba->cfg_fcp_io_channel), GFP_KERNEL);
7364 if (!phba->sli4_hba.fcp_cq_map) {
7365 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7366 "2545 Failed allocate memory for fast-path "
7372 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
7373 * how many EQs to create.
7375 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7378 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
7379 phba->sli4_hba.eq_ecount);
7381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7382 "0497 Failed allocate EQ (%d)\n", idx);
7385 phba->sli4_hba.hba_eq[idx] = qdesc;
7387 /* Create Fast Path FCP CQs */
7388 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7389 phba->sli4_hba.cq_ecount);
7391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7392 "0499 Failed allocate fast-path FCP "
7396 phba->sli4_hba.fcp_cq[idx] = qdesc;
7398 /* Create Fast Path FCP WQs */
7399 wqesize = (phba->fcp_embed_io) ?
7400 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
7401 qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
7402 phba->sli4_hba.wq_ecount);
7404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7405 "0503 Failed allocate fast-path FCP "
7409 phba->sli4_hba.fcp_wq[idx] = qdesc;
7414 * Create Slow Path Completion Queues (CQs)
7417 /* Create slow-path Mailbox Command Complete Queue */
7418 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7419 phba->sli4_hba.cq_ecount);
7421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7422 "0500 Failed allocate slow-path mailbox CQ\n");
7425 phba->sli4_hba.mbx_cq = qdesc;
7427 /* Create slow-path ELS Complete Queue */
7428 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7429 phba->sli4_hba.cq_ecount);
7431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7432 "0501 Failed allocate slow-path ELS CQ\n");
7435 phba->sli4_hba.els_cq = qdesc;
7439 * Create Slow Path Work Queues (WQs)
7442 /* Create Mailbox Command Queue */
7444 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
7445 phba->sli4_hba.mq_ecount);
7447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7448 "0505 Failed allocate slow-path MQ\n");
7451 phba->sli4_hba.mbx_wq = qdesc;
7454 * Create ELS Work Queues
7457 /* Create slow-path ELS Work Queue */
7458 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7459 phba->sli4_hba.wq_ecount);
7461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7462 "0504 Failed allocate slow-path ELS WQ\n");
7465 phba->sli4_hba.els_wq = qdesc;
7468 * Create Receive Queue (RQ)
7471 /* Create Receive Queue for header */
7472 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7473 phba->sli4_hba.rq_ecount);
7475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7476 "0506 Failed allocate receive HRQ\n");
7479 phba->sli4_hba.hdr_rq = qdesc;
7481 /* Create Receive Queue for data */
7482 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7483 phba->sli4_hba.rq_ecount);
7485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7486 "0507 Failed allocate receive DRQ\n");
7489 phba->sli4_hba.dat_rq = qdesc;
7491 /* Create the Queues needed for Flash Optimized Fabric operations */
7493 lpfc_fof_queue_create(phba);
7497 lpfc_sli4_queue_destroy(phba);
7502 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
7503 * @phba: pointer to lpfc hba data structure.
7505 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
7510 * -ENOMEM - No available memory
7511 * -EIO - The mailbox failed to complete successfully.
7514 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7519 lpfc_fof_queue_destroy(phba);
7521 if (phba->sli4_hba.hba_eq != NULL) {
7522 /* Release HBA event queue */
7523 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7524 if (phba->sli4_hba.hba_eq[idx] != NULL) {
7525 lpfc_sli4_queue_free(
7526 phba->sli4_hba.hba_eq[idx]);
7527 phba->sli4_hba.hba_eq[idx] = NULL;
7530 kfree(phba->sli4_hba.hba_eq);
7531 phba->sli4_hba.hba_eq = NULL;
7534 if (phba->sli4_hba.fcp_cq != NULL) {
7535 /* Release FCP completion queue */
7536 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7537 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
7538 lpfc_sli4_queue_free(
7539 phba->sli4_hba.fcp_cq[idx]);
7540 phba->sli4_hba.fcp_cq[idx] = NULL;
7543 kfree(phba->sli4_hba.fcp_cq);
7544 phba->sli4_hba.fcp_cq = NULL;
7547 if (phba->sli4_hba.fcp_wq != NULL) {
7548 /* Release FCP work queue */
7549 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7550 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
7551 lpfc_sli4_queue_free(
7552 phba->sli4_hba.fcp_wq[idx]);
7553 phba->sli4_hba.fcp_wq[idx] = NULL;
7556 kfree(phba->sli4_hba.fcp_wq);
7557 phba->sli4_hba.fcp_wq = NULL;
7560 /* Release FCP CQ mapping array */
7561 if (phba->sli4_hba.fcp_cq_map != NULL) {
7562 kfree(phba->sli4_hba.fcp_cq_map);
7563 phba->sli4_hba.fcp_cq_map = NULL;
7566 /* Release mailbox command work queue */
7567 if (phba->sli4_hba.mbx_wq != NULL) {
7568 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
7569 phba->sli4_hba.mbx_wq = NULL;
7572 /* Release ELS work queue */
7573 if (phba->sli4_hba.els_wq != NULL) {
7574 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
7575 phba->sli4_hba.els_wq = NULL;
7578 /* Release unsolicited receive queue */
7579 if (phba->sli4_hba.hdr_rq != NULL) {
7580 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
7581 phba->sli4_hba.hdr_rq = NULL;
7583 if (phba->sli4_hba.dat_rq != NULL) {
7584 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
7585 phba->sli4_hba.dat_rq = NULL;
7588 /* Release ELS complete queue */
7589 if (phba->sli4_hba.els_cq != NULL) {
7590 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
7591 phba->sli4_hba.els_cq = NULL;
7594 /* Release mailbox command complete queue */
7595 if (phba->sli4_hba.mbx_cq != NULL) {
7596 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
7597 phba->sli4_hba.mbx_cq = NULL;
7604 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
7605 * @phba: pointer to lpfc hba data structure.
7607 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
7612 * -ENOMEM - No available memory
7613 * -EIO - The mailbox failed to complete successfully.
7616 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7618 struct lpfc_sli *psli = &phba->sli;
7619 struct lpfc_sli_ring *pring;
7621 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7622 int fcp_cq_index = 0;
7623 uint32_t shdr_status, shdr_add_status;
7624 union lpfc_sli4_cfg_shdr *shdr;
7625 LPFC_MBOXQ_t *mboxq;
7628 /* Check for dual-ULP support */
7629 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7632 "3249 Unable to allocate memory for "
7633 "QUERY_FW_CFG mailbox command\n");
7636 length = (sizeof(struct lpfc_mbx_query_fw_config) -
7637 sizeof(struct lpfc_sli4_cfg_mhdr));
7638 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7639 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7640 length, LPFC_SLI4_MBX_EMBED);
7642 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7644 shdr = (union lpfc_sli4_cfg_shdr *)
7645 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7646 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7647 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7648 if (shdr_status || shdr_add_status || rc) {
7649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7650 "3250 QUERY_FW_CFG mailbox failed with status "
7651 "x%x add_status x%x, mbx status x%x\n",
7652 shdr_status, shdr_add_status, rc);
7653 if (rc != MBX_TIMEOUT)
7654 mempool_free(mboxq, phba->mbox_mem_pool);
7659 phba->sli4_hba.fw_func_mode =
7660 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7661 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7662 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7663 phba->sli4_hba.physical_port =
7664 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
7665 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7666 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7667 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7668 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7670 if (rc != MBX_TIMEOUT)
7671 mempool_free(mboxq, phba->mbox_mem_pool);
7674 * Set up HBA Event Queues (EQs)
7677 /* Set up HBA event queue */
7678 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7680 "3147 Fast-path EQs not allocated\n");
7684 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
7685 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
7686 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7687 "0522 Fast-path EQ (%d) not "
7688 "allocated\n", fcp_eqidx);
7690 goto out_destroy_hba_eq;
7692 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
7693 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
7695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7696 "0523 Failed setup of fast-path EQ "
7697 "(%d), rc = 0x%x\n", fcp_eqidx,
7699 goto out_destroy_hba_eq;
7701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7702 "2584 HBA EQ setup: "
7703 "queue[%d]-id=%d\n", fcp_eqidx,
7704 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
7707 /* Set up fast-path FCP Response Complete Queue */
7708 if (!phba->sli4_hba.fcp_cq) {
7709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7710 "3148 Fast-path FCP CQ array not "
7713 goto out_destroy_hba_eq;
7716 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
7717 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7719 "0526 Fast-path FCP CQ (%d) not "
7720 "allocated\n", fcp_cqidx);
7722 goto out_destroy_fcp_cq;
7724 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
7725 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
7727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7728 "0527 Failed setup of fast-path FCP "
7729 "CQ (%d), rc = 0x%x\n", fcp_cqidx,
7731 goto out_destroy_fcp_cq;
7734 /* Setup fcp_cq_map for fast lookup */
7735 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
7736 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
7738 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7739 "2588 FCP CQ setup: cq[%d]-id=%d, "
7740 "parent seq[%d]-id=%d\n",
7742 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7744 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
7747 /* Set up fast-path FCP Work Queue */
7748 if (!phba->sli4_hba.fcp_wq) {
7749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7750 "3149 Fast-path FCP WQ array not "
7753 goto out_destroy_fcp_cq;
7756 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7757 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7759 "0534 Fast-path FCP WQ (%d) not "
7760 "allocated\n", fcp_wqidx);
7762 goto out_destroy_fcp_wq;
7764 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7765 phba->sli4_hba.fcp_cq[fcp_wqidx],
7768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7769 "0535 Failed setup of fast-path FCP "
7770 "WQ (%d), rc = 0x%x\n", fcp_wqidx,
7772 goto out_destroy_fcp_wq;
7775 /* Bind this WQ to the next FCP ring */
7776 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7777 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7778 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7780 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7781 "2591 FCP WQ setup: wq[%d]-id=%d, "
7782 "parent cq[%d]-id=%d\n",
7784 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7786 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7789 * Set up Complete Queues (CQs)
7792 /* Set up slow-path MBOX Complete Queue as the first CQ */
7793 if (!phba->sli4_hba.mbx_cq) {
7794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7795 "0528 Mailbox CQ not allocated\n");
7797 goto out_destroy_fcp_wq;
7799 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7800 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
7802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7803 "0529 Failed setup of slow-path mailbox CQ: "
7804 "rc = 0x%x\n", (uint32_t)rc);
7805 goto out_destroy_fcp_wq;
7807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7808 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7809 phba->sli4_hba.mbx_cq->queue_id,
7810 phba->sli4_hba.hba_eq[0]->queue_id);
7812 /* Set up slow-path ELS Complete Queue */
7813 if (!phba->sli4_hba.els_cq) {
7814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7815 "0530 ELS CQ not allocated\n");
7817 goto out_destroy_mbx_cq;
7819 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7820 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7823 "0531 Failed setup of slow-path ELS CQ: "
7824 "rc = 0x%x\n", (uint32_t)rc);
7825 goto out_destroy_mbx_cq;
7827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7828 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7829 phba->sli4_hba.els_cq->queue_id,
7830 phba->sli4_hba.hba_eq[0]->queue_id);
7833 * Set up all the Work Queues (WQs)
7836 /* Set up Mailbox Command Queue */
7837 if (!phba->sli4_hba.mbx_wq) {
7838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7839 "0538 Slow-path MQ not allocated\n");
7841 goto out_destroy_els_cq;
7843 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7844 phba->sli4_hba.mbx_cq, LPFC_MBOX);
7846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7847 "0539 Failed setup of slow-path MQ: "
7849 goto out_destroy_els_cq;
7851 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7852 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7853 phba->sli4_hba.mbx_wq->queue_id,
7854 phba->sli4_hba.mbx_cq->queue_id);
7856 /* Set up slow-path ELS Work Queue */
7857 if (!phba->sli4_hba.els_wq) {
7858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7859 "0536 Slow-path ELS WQ not allocated\n");
7861 goto out_destroy_mbx_wq;
7863 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
7864 phba->sli4_hba.els_cq, LPFC_ELS);
7866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7867 "0537 Failed setup of slow-path ELS WQ: "
7868 "rc = 0x%x\n", (uint32_t)rc);
7869 goto out_destroy_mbx_wq;
7872 /* Bind this WQ to the ELS ring */
7873 pring = &psli->ring[LPFC_ELS_RING];
7874 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7875 phba->sli4_hba.els_cq->pring = pring;
7877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7878 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7879 phba->sli4_hba.els_wq->queue_id,
7880 phba->sli4_hba.els_cq->queue_id);
7883 * Create Receive Queue (RQ)
7885 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
7886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7887 "0540 Receive Queue not allocated\n");
7889 goto out_destroy_els_wq;
7892 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
7893 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
7895 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
7896 phba->sli4_hba.els_cq, LPFC_USOL);
7898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7899 "0541 Failed setup of Receive Queue: "
7900 "rc = 0x%x\n", (uint32_t)rc);
7901 goto out_destroy_fcp_wq;
7904 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7905 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7906 "parent cq-id=%d\n",
7907 phba->sli4_hba.hdr_rq->queue_id,
7908 phba->sli4_hba.dat_rq->queue_id,
7909 phba->sli4_hba.els_cq->queue_id);
7911 if (phba->cfg_fof) {
7912 rc = lpfc_fof_queue_setup(phba);
7914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7915 "0549 Failed setup of FOF Queues: "
7917 goto out_destroy_els_rq;
7922 * Configure EQ delay multipier for interrupt coalescing using
7923 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
7925 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
7926 fcp_eqidx += LPFC_MAX_EQ_DELAY)
7927 lpfc_modify_fcp_eq_delay(phba, fcp_eqidx);
7931 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7933 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7935 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7937 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7939 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7941 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7942 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7944 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7945 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7947 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7948 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7954 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
7955 * @phba: pointer to lpfc hba data structure.
7957 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
7962 * -ENOMEM - No available memory
7963 * -EIO - The mailbox failed to complete successfully.
7966 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7970 /* Unset the queues created for Flash Optimized Fabric operations */
7972 lpfc_fof_queue_destroy(phba);
7973 /* Unset mailbox command work queue */
7974 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7975 /* Unset ELS work queue */
7976 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7977 /* Unset unsolicited receive queue */
7978 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7979 /* Unset FCP work queue */
7980 if (phba->sli4_hba.fcp_wq) {
7981 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7983 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7985 /* Unset mailbox command complete queue */
7986 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7987 /* Unset ELS complete queue */
7988 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7989 /* Unset FCP response complete queue */
7990 if (phba->sli4_hba.fcp_cq) {
7991 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7993 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7995 /* Unset fast-path event queue */
7996 if (phba->sli4_hba.hba_eq) {
7997 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7999 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
8004 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
8005 * @phba: pointer to lpfc hba data structure.
8007 * This routine is invoked to allocate and set up a pool of completion queue
8008 * events. The body of the completion queue event is a completion queue entry
8009 * CQE. For now, this pool is used for the interrupt service routine to queue
8010 * the following HBA completion queue events for the worker thread to process:
8011 * - Mailbox asynchronous events
8012 * - Receive queue completion unsolicited events
8013 * Later, this can be used for all the slow-path events.
8017 * -ENOMEM - No available memory
8020 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
8022 struct lpfc_cq_event *cq_event;
8025 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
8026 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
8028 goto out_pool_create_fail;
8029 list_add_tail(&cq_event->list,
8030 &phba->sli4_hba.sp_cqe_event_pool);
8034 out_pool_create_fail:
8035 lpfc_sli4_cq_event_pool_destroy(phba);
8040 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
8041 * @phba: pointer to lpfc hba data structure.
8043 * This routine is invoked to free the pool of completion queue events at
8044 * driver unload time. Note that, it is the responsibility of the driver
8045 * cleanup routine to free all the outstanding completion-queue events
8046 * allocated from this pool back into the pool before invoking this routine
8047 * to destroy the pool.
8050 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
8052 struct lpfc_cq_event *cq_event, *next_cq_event;
8054 list_for_each_entry_safe(cq_event, next_cq_event,
8055 &phba->sli4_hba.sp_cqe_event_pool, list) {
8056 list_del(&cq_event->list);
8062 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
8063 * @phba: pointer to lpfc hba data structure.
8065 * This routine is the lock free version of the API invoked to allocate a
8066 * completion-queue event from the free pool.
8068 * Return: Pointer to the newly allocated completion-queue event if successful
8071 struct lpfc_cq_event *
8072 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
8074 struct lpfc_cq_event *cq_event = NULL;
8076 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
8077 struct lpfc_cq_event, list);
8082 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
8083 * @phba: pointer to lpfc hba data structure.
8085 * This routine is the lock version of the API invoked to allocate a
8086 * completion-queue event from the free pool.
8088 * Return: Pointer to the newly allocated completion-queue event if successful
8091 struct lpfc_cq_event *
8092 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
8094 struct lpfc_cq_event *cq_event;
8095 unsigned long iflags;
8097 spin_lock_irqsave(&phba->hbalock, iflags);
8098 cq_event = __lpfc_sli4_cq_event_alloc(phba);
8099 spin_unlock_irqrestore(&phba->hbalock, iflags);
8104 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
8105 * @phba: pointer to lpfc hba data structure.
8106 * @cq_event: pointer to the completion queue event to be freed.
8108 * This routine is the lock free version of the API invoked to release a
8109 * completion-queue event back into the free pool.
8112 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
8113 struct lpfc_cq_event *cq_event)
8115 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
8119 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
8120 * @phba: pointer to lpfc hba data structure.
8121 * @cq_event: pointer to the completion queue event to be freed.
8123 * This routine is the lock version of the API invoked to release a
8124 * completion-queue event back into the free pool.
8127 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
8128 struct lpfc_cq_event *cq_event)
8130 unsigned long iflags;
8131 spin_lock_irqsave(&phba->hbalock, iflags);
8132 __lpfc_sli4_cq_event_release(phba, cq_event);
8133 spin_unlock_irqrestore(&phba->hbalock, iflags);
8137 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
8138 * @phba: pointer to lpfc hba data structure.
8140 * This routine is to free all the pending completion-queue events to the
8141 * back into the free pool for device reset.
8144 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
8147 struct lpfc_cq_event *cqe;
8148 unsigned long iflags;
8150 /* Retrieve all the pending WCQEs from pending WCQE lists */
8151 spin_lock_irqsave(&phba->hbalock, iflags);
8152 /* Pending FCP XRI abort events */
8153 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8155 /* Pending ELS XRI abort events */
8156 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8158 /* Pending asynnc events */
8159 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
8161 spin_unlock_irqrestore(&phba->hbalock, iflags);
8163 while (!list_empty(&cqelist)) {
8164 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
8165 lpfc_sli4_cq_event_release(phba, cqe);
8170 * lpfc_pci_function_reset - Reset pci function.
8171 * @phba: pointer to lpfc hba data structure.
8173 * This routine is invoked to request a PCI function reset. It will destroys
8174 * all resources assigned to the PCI function which originates this request.
8178 * -ENOMEM - No available memory
8179 * -EIO - The mailbox failed to complete successfully.
8182 lpfc_pci_function_reset(struct lpfc_hba *phba)
8184 LPFC_MBOXQ_t *mboxq;
8185 uint32_t rc = 0, if_type;
8186 uint32_t shdr_status, shdr_add_status;
8188 uint32_t port_reset = 0;
8189 union lpfc_sli4_cfg_shdr *shdr;
8190 struct lpfc_register reg_data;
8193 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8195 case LPFC_SLI_INTF_IF_TYPE_0:
8196 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8200 "0494 Unable to allocate memory for "
8201 "issuing SLI_FUNCTION_RESET mailbox "
8206 /* Setup PCI function reset mailbox-ioctl command */
8207 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8208 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
8209 LPFC_SLI4_MBX_EMBED);
8210 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8211 shdr = (union lpfc_sli4_cfg_shdr *)
8212 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
8213 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
8214 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
8216 if (rc != MBX_TIMEOUT)
8217 mempool_free(mboxq, phba->mbox_mem_pool);
8218 if (shdr_status || shdr_add_status || rc) {
8219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8220 "0495 SLI_FUNCTION_RESET mailbox "
8221 "failed with status x%x add_status x%x,"
8222 " mbx status x%x\n",
8223 shdr_status, shdr_add_status, rc);
8227 case LPFC_SLI_INTF_IF_TYPE_2:
8230 * Poll the Port Status Register and wait for RDY for
8231 * up to 30 seconds. If the port doesn't respond, treat
8234 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
8235 if (lpfc_readl(phba->sli4_hba.u.if_type2.
8236 STATUSregaddr, ®_data.word0)) {
8240 if (bf_get(lpfc_sliport_status_rdy, ®_data))
8245 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
8246 phba->work_status[0] = readl(
8247 phba->sli4_hba.u.if_type2.ERR1regaddr);
8248 phba->work_status[1] = readl(
8249 phba->sli4_hba.u.if_type2.ERR2regaddr);
8250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8251 "2890 Port not ready, port status reg "
8252 "0x%x error 1=0x%x, error 2=0x%x\n",
8254 phba->work_status[0],
8255 phba->work_status[1]);
8262 * Reset the port now
8265 bf_set(lpfc_sliport_ctrl_end, ®_data,
8266 LPFC_SLIPORT_LITTLE_ENDIAN);
8267 bf_set(lpfc_sliport_ctrl_ip, ®_data,
8268 LPFC_SLIPORT_INIT_PORT);
8269 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
8272 pci_read_config_word(phba->pcidev,
8273 PCI_DEVICE_ID, &devid);
8278 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
8284 case LPFC_SLI_INTF_IF_TYPE_1:
8290 /* Catch the not-ready port failure after a port reset. */
8292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8293 "3317 HBA not functional: IP Reset Failed "
8294 "try: echo fw_reset > board_mode\n");
8302 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
8303 * @phba: pointer to lpfc hba data structure.
8305 * This routine is invoked to set up the PCI device memory space for device
8306 * with SLI-4 interface spec.
8310 * other values - error
8313 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
8315 struct pci_dev *pdev;
8316 unsigned long bar0map_len, bar1map_len, bar2map_len;
8317 int error = -ENODEV;
8320 /* Obtain PCI device reference */
8324 pdev = phba->pcidev;
8326 /* Set the device DMA mask size */
8327 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
8328 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
8329 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
8330 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
8336 * The BARs and register set definitions and offset locations are
8337 * dependent on the if_type.
8339 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
8340 &phba->sli4_hba.sli_intf.word0)) {
8344 /* There is no SLI3 failback for SLI4 devices. */
8345 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
8346 LPFC_SLI_INTF_VALID) {
8347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8348 "2894 SLI_INTF reg contents invalid "
8349 "sli_intf reg 0x%x\n",
8350 phba->sli4_hba.sli_intf.word0);
8354 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8356 * Get the bus address of SLI4 device Bar regions and the
8357 * number of bytes required by each mapping. The mapping of the
8358 * particular PCI BARs regions is dependent on the type of
8361 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
8362 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
8363 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
8366 * Map SLI4 PCI Config Space Register base to a kernel virtual
8369 phba->sli4_hba.conf_regs_memmap_p =
8370 ioremap(phba->pci_bar0_map, bar0map_len);
8371 if (!phba->sli4_hba.conf_regs_memmap_p) {
8372 dev_printk(KERN_ERR, &pdev->dev,
8373 "ioremap failed for SLI4 PCI config "
8377 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
8378 /* Set up BAR0 PCI config space register memory map */
8379 lpfc_sli4_bar0_register_memmap(phba, if_type);
8381 phba->pci_bar0_map = pci_resource_start(pdev, 1);
8382 bar0map_len = pci_resource_len(pdev, 1);
8383 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8384 dev_printk(KERN_ERR, &pdev->dev,
8385 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
8388 phba->sli4_hba.conf_regs_memmap_p =
8389 ioremap(phba->pci_bar0_map, bar0map_len);
8390 if (!phba->sli4_hba.conf_regs_memmap_p) {
8391 dev_printk(KERN_ERR, &pdev->dev,
8392 "ioremap failed for SLI4 PCI config "
8396 lpfc_sli4_bar0_register_memmap(phba, if_type);
8399 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8400 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
8402 * Map SLI4 if type 0 HBA Control Register base to a kernel
8403 * virtual address and setup the registers.
8405 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
8406 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
8407 phba->sli4_hba.ctrl_regs_memmap_p =
8408 ioremap(phba->pci_bar1_map, bar1map_len);
8409 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
8410 dev_printk(KERN_ERR, &pdev->dev,
8411 "ioremap failed for SLI4 HBA control registers.\n");
8412 goto out_iounmap_conf;
8414 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
8415 lpfc_sli4_bar1_register_memmap(phba);
8418 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
8419 (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
8421 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
8422 * virtual address and setup the registers.
8424 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
8425 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
8426 phba->sli4_hba.drbl_regs_memmap_p =
8427 ioremap(phba->pci_bar2_map, bar2map_len);
8428 if (!phba->sli4_hba.drbl_regs_memmap_p) {
8429 dev_printk(KERN_ERR, &pdev->dev,
8430 "ioremap failed for SLI4 HBA doorbell registers.\n");
8431 goto out_iounmap_ctrl;
8433 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
8434 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8436 goto out_iounmap_all;
8442 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8444 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8446 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8452 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
8453 * @phba: pointer to lpfc hba data structure.
8455 * This routine is invoked to unset the PCI device memory space for device
8456 * with SLI-4 interface spec.
8459 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
8462 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8465 case LPFC_SLI_INTF_IF_TYPE_0:
8466 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8467 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8468 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8470 case LPFC_SLI_INTF_IF_TYPE_2:
8471 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8473 case LPFC_SLI_INTF_IF_TYPE_1:
8475 dev_printk(KERN_ERR, &phba->pcidev->dev,
8476 "FATAL - unsupported SLI4 interface type - %d\n",
8483 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
8484 * @phba: pointer to lpfc hba data structure.
8486 * This routine is invoked to enable the MSI-X interrupt vectors to device
8487 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
8488 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
8489 * once invoked, enables either all or nothing, depending on the current
8490 * availability of PCI vector resources. The device driver is responsible
8491 * for calling the individual request_irq() to register each MSI-X vector
8492 * with a interrupt handler, which is done in this function. Note that
8493 * later when device is unloading, the driver should always call free_irq()
8494 * on all MSI-X vectors it has done request_irq() on before calling
8495 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8496 * will be left with MSI-X enabled and leaks its vectors.
8500 * other values - error
8503 lpfc_sli_enable_msix(struct lpfc_hba *phba)
8508 /* Set up MSI-X multi-message vectors */
8509 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8510 phba->msix_entries[i].entry = i;
8512 /* Configure MSI-X capability structure */
8513 rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
8516 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8517 "0420 PCI enable MSI-X failed (%d)\n", rc);
8520 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8521 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8522 "0477 MSI-X entry[%d]: vector=x%x "
8524 phba->msix_entries[i].vector,
8525 phba->msix_entries[i].entry);
8527 * Assign MSI-X vectors to interrupt handlers
8530 /* vector-0 is associated to slow-path handler */
8531 rc = request_irq(phba->msix_entries[0].vector,
8532 &lpfc_sli_sp_intr_handler, 0,
8533 LPFC_SP_DRIVER_HANDLER_NAME, phba);
8535 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8536 "0421 MSI-X slow-path request_irq failed "
8541 /* vector-1 is associated to fast-path handler */
8542 rc = request_irq(phba->msix_entries[1].vector,
8543 &lpfc_sli_fp_intr_handler, 0,
8544 LPFC_FP_DRIVER_HANDLER_NAME, phba);
8547 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8548 "0429 MSI-X fast-path request_irq failed "
8554 * Configure HBA MSI-X attention conditions to messages
8556 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8561 "0474 Unable to allocate memory for issuing "
8562 "MBOX_CONFIG_MSI command\n");
8565 rc = lpfc_config_msi(phba, pmb);
8568 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8569 if (rc != MBX_SUCCESS) {
8570 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
8571 "0351 Config MSI mailbox command failed, "
8572 "mbxCmd x%x, mbxStatus x%x\n",
8573 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
8577 /* Free memory allocated for mailbox command */
8578 mempool_free(pmb, phba->mbox_mem_pool);
8582 /* Free memory allocated for mailbox command */
8583 mempool_free(pmb, phba->mbox_mem_pool);
8586 /* free the irq already requested */
8587 free_irq(phba->msix_entries[1].vector, phba);
8590 /* free the irq already requested */
8591 free_irq(phba->msix_entries[0].vector, phba);
8594 /* Unconfigure MSI-X capability structure */
8595 pci_disable_msix(phba->pcidev);
8602 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
8603 * @phba: pointer to lpfc hba data structure.
8605 * This routine is invoked to release the MSI-X vectors and then disable the
8606 * MSI-X interrupt mode to device with SLI-3 interface spec.
8609 lpfc_sli_disable_msix(struct lpfc_hba *phba)
8613 /* Free up MSI-X multi-message vectors */
8614 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8615 free_irq(phba->msix_entries[i].vector, phba);
8617 pci_disable_msix(phba->pcidev);
8623 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
8624 * @phba: pointer to lpfc hba data structure.
8626 * This routine is invoked to enable the MSI interrupt mode to device with
8627 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
8628 * enable the MSI vector. The device driver is responsible for calling the
8629 * request_irq() to register MSI vector with a interrupt the handler, which
8630 * is done in this function.
8634 * other values - error
8637 lpfc_sli_enable_msi(struct lpfc_hba *phba)
8641 rc = pci_enable_msi(phba->pcidev);
8643 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8644 "0462 PCI enable MSI mode success.\n");
8646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8647 "0471 PCI enable MSI mode failed (%d)\n", rc);
8651 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8652 0, LPFC_DRIVER_NAME, phba);
8654 pci_disable_msi(phba->pcidev);
8655 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8656 "0478 MSI request_irq failed (%d)\n", rc);
8662 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
8663 * @phba: pointer to lpfc hba data structure.
8665 * This routine is invoked to disable the MSI interrupt mode to device with
8666 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
8667 * done request_irq() on before calling pci_disable_msi(). Failure to do so
8668 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
8672 lpfc_sli_disable_msi(struct lpfc_hba *phba)
8674 free_irq(phba->pcidev->irq, phba);
8675 pci_disable_msi(phba->pcidev);
8680 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
8681 * @phba: pointer to lpfc hba data structure.
8683 * This routine is invoked to enable device interrupt and associate driver's
8684 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
8685 * spec. Depends on the interrupt mode configured to the driver, the driver
8686 * will try to fallback from the configured interrupt mode to an interrupt
8687 * mode which is supported by the platform, kernel, and device in the order
8689 * MSI-X -> MSI -> IRQ.
8693 * other values - error
8696 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8698 uint32_t intr_mode = LPFC_INTR_ERROR;
8701 if (cfg_mode == 2) {
8702 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
8703 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
8705 /* Now, try to enable MSI-X interrupt mode */
8706 retval = lpfc_sli_enable_msix(phba);
8708 /* Indicate initialization to MSI-X mode */
8709 phba->intr_type = MSIX;
8715 /* Fallback to MSI if MSI-X initialization failed */
8716 if (cfg_mode >= 1 && phba->intr_type == NONE) {
8717 retval = lpfc_sli_enable_msi(phba);
8719 /* Indicate initialization to MSI mode */
8720 phba->intr_type = MSI;
8725 /* Fallback to INTx if both MSI-X/MSI initalization failed */
8726 if (phba->intr_type == NONE) {
8727 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8728 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8730 /* Indicate initialization to INTx mode */
8731 phba->intr_type = INTx;
8739 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
8740 * @phba: pointer to lpfc hba data structure.
8742 * This routine is invoked to disable device interrupt and disassociate the
8743 * driver's interrupt handler(s) from interrupt vector(s) to device with
8744 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
8745 * release the interrupt vector(s) for the message signaled interrupt.
8748 lpfc_sli_disable_intr(struct lpfc_hba *phba)
8750 /* Disable the currently initialized interrupt mode */
8751 if (phba->intr_type == MSIX)
8752 lpfc_sli_disable_msix(phba);
8753 else if (phba->intr_type == MSI)
8754 lpfc_sli_disable_msi(phba);
8755 else if (phba->intr_type == INTx)
8756 free_irq(phba->pcidev->irq, phba);
8758 /* Reset interrupt management states */
8759 phba->intr_type = NONE;
8760 phba->sli.slistat.sli_intr = 0;
8766 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
8767 * @phba: pointer to lpfc hba data structure.
8769 * Find next available CPU to use for IRQ to CPU affinity.
8772 lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
8774 struct lpfc_vector_map_info *cpup;
8777 cpup = phba->sli4_hba.cpu_map;
8778 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8779 /* CPU must be online */
8780 if (cpu_online(cpu)) {
8781 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8782 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
8783 (cpup->phys_id == phys_id)) {
8791 * If we get here, we have used ALL CPUs for the specific
8792 * phys_id. Now we need to clear out lpfc_used_cpu and start
8796 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8797 if (lpfc_used_cpu[cpu] == phys_id)
8798 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
8801 cpup = phba->sli4_hba.cpu_map;
8802 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8803 /* CPU must be online */
8804 if (cpu_online(cpu)) {
8805 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8806 (cpup->phys_id == phys_id)) {
8812 return LPFC_VECTOR_MAP_EMPTY;
8816 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
8817 * @phba: pointer to lpfc hba data structure.
8818 * @vectors: number of HBA vectors
8820 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
8821 * affinization across multple physical CPUs (numa nodes).
8822 * In addition, this routine will assign an IO channel for each CPU
8823 * to use when issuing I/Os.
8826 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8828 int i, idx, saved_chann, used_chann, cpu, phys_id;
8829 int max_phys_id, min_phys_id;
8830 int num_io_channel, first_cpu, chan;
8831 struct lpfc_vector_map_info *cpup;
8833 struct cpuinfo_x86 *cpuinfo;
8835 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
8837 /* If there is no mapping, just return */
8838 if (!phba->cfg_fcp_cpu_map)
8841 /* Init cpu_map array */
8842 memset(phba->sli4_hba.cpu_map, 0xff,
8843 (sizeof(struct lpfc_vector_map_info) *
8844 phba->sli4_hba.num_present_cpu));
8850 first_cpu = LPFC_VECTOR_MAP_EMPTY;
8852 /* Update CPU map with physical id and core id of each CPU */
8853 cpup = phba->sli4_hba.cpu_map;
8854 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8856 cpuinfo = &cpu_data(cpu);
8857 cpup->phys_id = cpuinfo->phys_proc_id;
8858 cpup->core_id = cpuinfo->cpu_core_id;
8860 /* No distinction between CPUs for other platforms */
8865 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8866 "3328 CPU physid %d coreid %d\n",
8867 cpup->phys_id, cpup->core_id);
8869 if (cpup->phys_id > max_phys_id)
8870 max_phys_id = cpup->phys_id;
8871 if (cpup->phys_id < min_phys_id)
8872 min_phys_id = cpup->phys_id;
8876 phys_id = min_phys_id;
8877 /* Now associate the HBA vectors with specific CPUs */
8878 for (idx = 0; idx < vectors; idx++) {
8879 cpup = phba->sli4_hba.cpu_map;
8880 cpu = lpfc_find_next_cpu(phba, phys_id);
8881 if (cpu == LPFC_VECTOR_MAP_EMPTY) {
8883 /* Try for all phys_id's */
8884 for (i = 1; i < max_phys_id; i++) {
8886 if (phys_id > max_phys_id)
8887 phys_id = min_phys_id;
8888 cpu = lpfc_find_next_cpu(phba, phys_id);
8889 if (cpu == LPFC_VECTOR_MAP_EMPTY)
8894 /* Use round robin for scheduling */
8895 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
8897 cpup = phba->sli4_hba.cpu_map;
8898 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
8899 cpup->channel_id = chan;
8902 if (chan >= phba->cfg_fcp_io_channel)
8906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8907 "3329 Cannot set affinity:"
8908 "Error mapping vector %d (%d)\n",
8914 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
8915 lpfc_used_cpu[cpu] = phys_id;
8917 /* Associate vector with selected CPU */
8918 cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
8920 /* Associate IO channel with selected CPU */
8921 cpup->channel_id = idx;
8924 if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
8927 /* Now affinitize to the selected CPU */
8928 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
8929 vector, get_cpu_mask(cpu));
8931 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8932 "3330 Set Affinity: CPU %d channel %d "
8934 cpu, cpup->channel_id,
8935 phba->sli4_hba.msix_entries[idx].vector, i);
8937 /* Spread vector mapping across multple physical CPU nodes */
8939 if (phys_id > max_phys_id)
8940 phys_id = min_phys_id;
8944 * Finally fill in the IO channel for any remaining CPUs.
8945 * At this point, all IO channels have been assigned to a specific
8946 * MSIx vector, mapped to a specific CPU.
8947 * Base the remaining IO channel assigned, to IO channels already
8948 * assigned to other CPUs on the same phys_id.
8950 for (i = min_phys_id; i <= max_phys_id; i++) {
8952 * If there are no io channels already mapped to
8953 * this phys_id, just round robin thru the io_channels.
8954 * Setup chann[] for round robin.
8956 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8963 * First build a list of IO channels already assigned
8964 * to this phys_id before reassigning the same IO
8965 * channels to the remaining CPUs.
8967 cpup = phba->sli4_hba.cpu_map;
8970 for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
8972 if (cpup->phys_id == i) {
8974 * Save any IO channels that are
8975 * already mapped to this phys_id.
8977 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
8979 LPFC_FCP_IO_CHAN_MAX) {
8980 chann[saved_chann] =
8987 /* See if we are using round-robin */
8988 if (saved_chann == 0)
8990 phba->cfg_fcp_io_channel;
8992 /* Associate next IO channel with CPU */
8993 cpup->channel_id = chann[used_chann];
8996 if (used_chann == saved_chann)
8999 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9000 "3331 Set IO_CHANN "
9001 "CPU %d channel %d\n",
9002 idx, cpup->channel_id);
9006 if (cpu >= phba->sli4_hba.num_present_cpu) {
9007 cpup = phba->sli4_hba.cpu_map;
9015 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
9016 cpup = phba->sli4_hba.cpu_map;
9017 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
9018 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
9019 cpup->channel_id = 0;
9022 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9023 "3332 Assign IO_CHANN "
9024 "CPU %d channel %d\n",
9025 idx, cpup->channel_id);
9032 if (num_io_channel != phba->sli4_hba.num_present_cpu)
9033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9034 "3333 Set affinity mismatch:"
9035 "%d chann != %d cpus: %d vectors\n",
9036 num_io_channel, phba->sli4_hba.num_present_cpu,
9039 /* Enable using cpu affinity for scheduling */
9040 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
9046 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
9047 * @phba: pointer to lpfc hba data structure.
9049 * This routine is invoked to enable the MSI-X interrupt vectors to device
9050 * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
9051 * is called to enable the MSI-X vectors. The device driver is responsible
9052 * for calling the individual request_irq() to register each MSI-X vector
9053 * with a interrupt handler, which is done in this function. Note that
9054 * later when device is unloading, the driver should always call free_irq()
9055 * on all MSI-X vectors it has done request_irq() on before calling
9056 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
9057 * will be left with MSI-X enabled and leaks its vectors.
9061 * other values - error
9064 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
9066 int vectors, rc, index;
9068 /* Set up MSI-X multi-message vectors */
9069 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
9070 phba->sli4_hba.msix_entries[index].entry = index;
9072 /* Configure MSI-X capability structure */
9073 vectors = phba->cfg_fcp_io_channel;
9074 if (phba->cfg_fof) {
9075 phba->sli4_hba.msix_entries[index].entry = index;
9078 rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
9081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9082 "0484 PCI enable MSI-X failed (%d)\n", rc);
9087 /* Log MSI-X vector assignment */
9088 for (index = 0; index < vectors; index++)
9089 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9090 "0489 MSI-X entry[%d]: vector=x%x "
9091 "message=%d\n", index,
9092 phba->sli4_hba.msix_entries[index].vector,
9093 phba->sli4_hba.msix_entries[index].entry);
9095 /* Assign MSI-X vectors to interrupt handlers */
9096 for (index = 0; index < vectors; index++) {
9097 memset(&phba->sli4_hba.handler_name[index], 0, 16);
9098 snprintf((char *)&phba->sli4_hba.handler_name[index],
9099 LPFC_SLI4_HANDLER_NAME_SZ,
9100 LPFC_DRIVER_HANDLER_NAME"%d", index);
9102 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9103 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9104 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
9105 if (phba->cfg_fof && (index == (vectors - 1)))
9107 phba->sli4_hba.msix_entries[index].vector,
9108 &lpfc_sli4_fof_intr_handler, 0,
9109 (char *)&phba->sli4_hba.handler_name[index],
9110 &phba->sli4_hba.fcp_eq_hdl[index]);
9113 phba->sli4_hba.msix_entries[index].vector,
9114 &lpfc_sli4_hba_intr_handler, 0,
9115 (char *)&phba->sli4_hba.handler_name[index],
9116 &phba->sli4_hba.fcp_eq_hdl[index]);
9118 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9119 "0486 MSI-X fast-path (%d) "
9120 "request_irq failed (%d)\n", index, rc);
9128 if (vectors != phba->cfg_fcp_io_channel) {
9129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9130 "3238 Reducing IO channels to match number of "
9131 "MSI-X vectors, requested %d got %d\n",
9132 phba->cfg_fcp_io_channel, vectors);
9133 phba->cfg_fcp_io_channel = vectors;
9136 if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
9137 lpfc_sli4_set_affinity(phba, vectors);
9141 /* free the irq already requested */
9142 for (--index; index >= 0; index--) {
9143 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
9145 free_irq(phba->sli4_hba.msix_entries[index].vector,
9146 &phba->sli4_hba.fcp_eq_hdl[index]);
9149 /* Unconfigure MSI-X capability structure */
9150 pci_disable_msix(phba->pcidev);
9157 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
9158 * @phba: pointer to lpfc hba data structure.
9160 * This routine is invoked to release the MSI-X vectors and then disable the
9161 * MSI-X interrupt mode to device with SLI-4 interface spec.
9164 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
9168 /* Free up MSI-X multi-message vectors */
9169 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
9170 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
9172 free_irq(phba->sli4_hba.msix_entries[index].vector,
9173 &phba->sli4_hba.fcp_eq_hdl[index]);
9175 if (phba->cfg_fof) {
9176 free_irq(phba->sli4_hba.msix_entries[index].vector,
9177 &phba->sli4_hba.fcp_eq_hdl[index]);
9180 pci_disable_msix(phba->pcidev);
9186 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
9187 * @phba: pointer to lpfc hba data structure.
9189 * This routine is invoked to enable the MSI interrupt mode to device with
9190 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
9191 * to enable the MSI vector. The device driver is responsible for calling
9192 * the request_irq() to register MSI vector with a interrupt the handler,
9193 * which is done in this function.
9197 * other values - error
9200 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
9204 rc = pci_enable_msi(phba->pcidev);
9206 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9207 "0487 PCI enable MSI mode success.\n");
9209 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9210 "0488 PCI enable MSI mode failed (%d)\n", rc);
9214 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9215 0, LPFC_DRIVER_NAME, phba);
9217 pci_disable_msi(phba->pcidev);
9218 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
9219 "0490 MSI request_irq failed (%d)\n", rc);
9223 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
9224 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9225 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9228 if (phba->cfg_fof) {
9229 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9230 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9236 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
9237 * @phba: pointer to lpfc hba data structure.
9239 * This routine is invoked to disable the MSI interrupt mode to device with
9240 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
9241 * done request_irq() on before calling pci_disable_msi(). Failure to do so
9242 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
9246 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
9248 free_irq(phba->pcidev->irq, phba);
9249 pci_disable_msi(phba->pcidev);
9254 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
9255 * @phba: pointer to lpfc hba data structure.
9257 * This routine is invoked to enable device interrupt and associate driver's
9258 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
9259 * interface spec. Depends on the interrupt mode configured to the driver,
9260 * the driver will try to fallback from the configured interrupt mode to an
9261 * interrupt mode which is supported by the platform, kernel, and device in
9263 * MSI-X -> MSI -> IRQ.
9267 * other values - error
9270 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
9272 uint32_t intr_mode = LPFC_INTR_ERROR;
9275 if (cfg_mode == 2) {
9276 /* Preparation before conf_msi mbox cmd */
9279 /* Now, try to enable MSI-X interrupt mode */
9280 retval = lpfc_sli4_enable_msix(phba);
9282 /* Indicate initialization to MSI-X mode */
9283 phba->intr_type = MSIX;
9289 /* Fallback to MSI if MSI-X initialization failed */
9290 if (cfg_mode >= 1 && phba->intr_type == NONE) {
9291 retval = lpfc_sli4_enable_msi(phba);
9293 /* Indicate initialization to MSI mode */
9294 phba->intr_type = MSI;
9299 /* Fallback to INTx if both MSI-X/MSI initalization failed */
9300 if (phba->intr_type == NONE) {
9301 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
9302 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
9304 /* Indicate initialization to INTx mode */
9305 phba->intr_type = INTx;
9307 for (index = 0; index < phba->cfg_fcp_io_channel;
9309 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9310 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9311 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
9314 if (phba->cfg_fof) {
9315 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
9316 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
9317 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
9326 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
9327 * @phba: pointer to lpfc hba data structure.
9329 * This routine is invoked to disable device interrupt and disassociate
9330 * the driver's interrupt handler(s) from interrupt vector(s) to device
9331 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
9332 * will release the interrupt vector(s) for the message signaled interrupt.
9335 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
9337 /* Disable the currently initialized interrupt mode */
9338 if (phba->intr_type == MSIX)
9339 lpfc_sli4_disable_msix(phba);
9340 else if (phba->intr_type == MSI)
9341 lpfc_sli4_disable_msi(phba);
9342 else if (phba->intr_type == INTx)
9343 free_irq(phba->pcidev->irq, phba);
9345 /* Reset interrupt management states */
9346 phba->intr_type = NONE;
9347 phba->sli.slistat.sli_intr = 0;
9353 * lpfc_unset_hba - Unset SLI3 hba device initialization
9354 * @phba: pointer to lpfc hba data structure.
9356 * This routine is invoked to unset the HBA device initialization steps to
9357 * a device with SLI-3 interface spec.
9360 lpfc_unset_hba(struct lpfc_hba *phba)
9362 struct lpfc_vport *vport = phba->pport;
9363 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9365 spin_lock_irq(shost->host_lock);
9366 vport->load_flag |= FC_UNLOADING;
9367 spin_unlock_irq(shost->host_lock);
9369 kfree(phba->vpi_bmask);
9370 kfree(phba->vpi_ids);
9372 lpfc_stop_hba_timers(phba);
9374 phba->pport->work_port_events = 0;
9376 lpfc_sli_hba_down(phba);
9378 lpfc_sli_brdrestart(phba);
9380 lpfc_sli_disable_intr(phba);
9386 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
9387 * @phba: Pointer to HBA context object.
9389 * This function is called in the SLI4 code path to wait for completion
9390 * of device's XRIs exchange busy. It will check the XRI exchange busy
9391 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
9392 * that, it will check the XRI exchange busy on outstanding FCP and ELS
9393 * I/Os every 30 seconds, log error message, and wait forever. Only when
9394 * all XRI exchange busy complete, the driver unload shall proceed with
9395 * invoking the function reset ioctl mailbox command to the CNA and the
9396 * the rest of the driver unload resource release.
9399 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
9402 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9403 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9405 while (!fcp_xri_cmpl || !els_xri_cmpl) {
9406 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
9408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9409 "2877 FCP XRI exchange busy "
9410 "wait time: %d seconds.\n",
9413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9414 "2878 ELS XRI exchange busy "
9415 "wait time: %d seconds.\n",
9417 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
9418 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
9420 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
9421 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
9424 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
9426 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
9431 * lpfc_sli4_hba_unset - Unset the fcoe hba
9432 * @phba: Pointer to HBA context object.
9434 * This function is called in the SLI4 code path to reset the HBA's FCoE
9435 * function. The caller is not required to hold any lock. This routine
9436 * issues PCI function reset mailbox command to reset the FCoE function.
9437 * At the end of the function, it calls lpfc_hba_down_post function to
9438 * free any pending commands.
9441 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
9444 LPFC_MBOXQ_t *mboxq;
9445 struct pci_dev *pdev = phba->pcidev;
9447 lpfc_stop_hba_timers(phba);
9448 phba->sli4_hba.intr_enable = 0;
9451 * Gracefully wait out the potential current outstanding asynchronous
9455 /* First, block any pending async mailbox command from posted */
9456 spin_lock_irq(&phba->hbalock);
9457 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9458 spin_unlock_irq(&phba->hbalock);
9459 /* Now, trying to wait it out if we can */
9460 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9462 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
9465 /* Forcefully release the outstanding mailbox command if timed out */
9466 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9467 spin_lock_irq(&phba->hbalock);
9468 mboxq = phba->sli.mbox_active;
9469 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9470 __lpfc_mbox_cmpl_put(phba, mboxq);
9471 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9472 phba->sli.mbox_active = NULL;
9473 spin_unlock_irq(&phba->hbalock);
9476 /* Abort all iocbs associated with the hba */
9477 lpfc_sli_hba_iocb_abort(phba);
9479 /* Wait for completion of device XRI exchange busy */
9480 lpfc_sli4_xri_exchange_busy_wait(phba);
9482 /* Disable PCI subsystem interrupt */
9483 lpfc_sli4_disable_intr(phba);
9485 /* Disable SR-IOV if enabled */
9486 if (phba->cfg_sriov_nr_virtfn)
9487 pci_disable_sriov(pdev);
9489 /* Stop kthread signal shall trigger work_done one more time */
9490 kthread_stop(phba->worker_thread);
9492 /* Reset SLI4 HBA FCoE function */
9493 lpfc_pci_function_reset(phba);
9494 lpfc_sli4_queue_destroy(phba);
9496 /* Stop the SLI4 device port */
9497 phba->pport->work_port_events = 0;
9501 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
9502 * @phba: Pointer to HBA context object.
9503 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9505 * This function is called in the SLI4 code path to read the port's
9506 * sli4 capabilities.
9508 * This function may be be called from any context that can block-wait
9509 * for the completion. The expectation is that this routine is called
9510 * typically from probe_one or from the online routine.
9513 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9516 struct lpfc_mqe *mqe;
9517 struct lpfc_pc_sli4_params *sli4_params;
9521 mqe = &mboxq->u.mqe;
9523 /* Read the port's SLI4 Parameters port capabilities */
9524 lpfc_pc_sli4_params(mboxq);
9525 if (!phba->sli4_hba.intr_enable)
9526 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9528 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9529 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9535 sli4_params = &phba->sli4_hba.pc_sli4_params;
9536 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
9537 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
9538 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
9539 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
9540 &mqe->un.sli4_params);
9541 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
9542 &mqe->un.sli4_params);
9543 sli4_params->proto_types = mqe->un.sli4_params.word3;
9544 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
9545 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
9546 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
9547 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
9548 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
9549 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
9550 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
9551 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
9552 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
9553 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
9554 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
9555 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
9556 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
9557 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
9558 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
9559 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
9560 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
9561 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
9562 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
9563 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
9565 /* Make sure that sge_supp_len can be handled by the driver */
9566 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9567 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9573 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
9574 * @phba: Pointer to HBA context object.
9575 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
9577 * This function is called in the SLI4 code path to read the port's
9578 * sli4 capabilities.
9580 * This function may be be called from any context that can block-wait
9581 * for the completion. The expectation is that this routine is called
9582 * typically from probe_one or from the online routine.
9585 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9588 struct lpfc_mqe *mqe = &mboxq->u.mqe;
9589 struct lpfc_pc_sli4_params *sli4_params;
9592 struct lpfc_sli4_parameters *mbx_sli4_parameters;
9595 * By default, the driver assumes the SLI4 port requires RPI
9596 * header postings. The SLI4_PARAM response will correct this
9599 phba->sli4_hba.rpi_hdrs_in_use = 1;
9601 /* Read the port's SLI4 Config Parameters */
9602 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
9603 sizeof(struct lpfc_sli4_cfg_mhdr));
9604 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9605 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
9606 length, LPFC_SLI4_MBX_EMBED);
9607 if (!phba->sli4_hba.intr_enable)
9608 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9610 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9611 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9615 sli4_params = &phba->sli4_hba.pc_sli4_params;
9616 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
9617 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
9618 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
9619 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
9620 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
9621 mbx_sli4_parameters);
9622 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
9623 mbx_sli4_parameters);
9624 if (bf_get(cfg_phwq, mbx_sli4_parameters))
9625 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
9627 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
9628 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
9629 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
9630 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
9631 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
9632 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
9633 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
9634 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
9635 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
9636 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
9637 mbx_sli4_parameters);
9638 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
9639 mbx_sli4_parameters);
9640 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
9641 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
9643 /* Make sure that sge_supp_len can be handled by the driver */
9644 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9645 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9648 * Issue IOs with CDB embedded in WQE to minimized the number
9649 * of DMAs the firmware has to do. Setting this to 1 also forces
9650 * the driver to use 128 bytes WQEs for FCP IOs.
9652 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
9653 phba->fcp_embed_io = 1;
9655 phba->fcp_embed_io = 0;
9658 * Check if the SLI port supports MDS Diagnostics
9660 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
9661 phba->mds_diags_support = 1;
9663 phba->mds_diags_support = 0;
9668 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
9669 * @pdev: pointer to PCI device
9670 * @pid: pointer to PCI device identifier
9672 * This routine is to be called to attach a device with SLI-3 interface spec
9673 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9674 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
9675 * information of the device and driver to see if the driver state that it can
9676 * support this kind of device. If the match is successful, the driver core
9677 * invokes this routine. If this routine determines it can claim the HBA, it
9678 * does all the initialization that it needs to do to handle the HBA properly.
9681 * 0 - driver can claim the device
9682 * negative value - driver can not claim the device
9685 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
9687 struct lpfc_hba *phba;
9688 struct lpfc_vport *vport = NULL;
9689 struct Scsi_Host *shost = NULL;
9691 uint32_t cfg_mode, intr_mode;
9693 /* Allocate memory for HBA structure */
9694 phba = lpfc_hba_alloc(pdev);
9698 /* Perform generic PCI device enabling operation */
9699 error = lpfc_enable_pci_dev(phba);
9703 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
9704 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
9706 goto out_disable_pci_dev;
9708 /* Set up SLI-3 specific device PCI memory space */
9709 error = lpfc_sli_pci_mem_setup(phba);
9711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9712 "1402 Failed to set up pci memory space.\n");
9713 goto out_disable_pci_dev;
9716 /* Set up phase-1 common device driver resources */
9717 error = lpfc_setup_driver_resource_phase1(phba);
9719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9720 "1403 Failed to set up driver resource.\n");
9721 goto out_unset_pci_mem_s3;
9724 /* Set up SLI-3 specific device driver resources */
9725 error = lpfc_sli_driver_resource_setup(phba);
9727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9728 "1404 Failed to set up driver resource.\n");
9729 goto out_unset_pci_mem_s3;
9732 /* Initialize and populate the iocb list per host */
9733 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
9735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9736 "1405 Failed to initialize iocb list.\n");
9737 goto out_unset_driver_resource_s3;
9740 /* Set up common device driver resources */
9741 error = lpfc_setup_driver_resource_phase2(phba);
9743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9744 "1406 Failed to set up driver resource.\n");
9745 goto out_free_iocb_list;
9748 /* Get the default values for Model Name and Description */
9749 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9751 /* Create SCSI host to the physical port */
9752 error = lpfc_create_shost(phba);
9754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9755 "1407 Failed to create scsi host.\n");
9756 goto out_unset_driver_resource;
9759 /* Configure sysfs attributes */
9760 vport = phba->pport;
9761 error = lpfc_alloc_sysfs_attr(vport);
9763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9764 "1476 Failed to allocate sysfs attr\n");
9765 goto out_destroy_shost;
9768 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
9769 /* Now, trying to enable interrupt and bring up the device */
9770 cfg_mode = phba->cfg_use_msi;
9772 /* Put device to a known state before enabling interrupt */
9773 lpfc_stop_port(phba);
9774 /* Configure and enable interrupt */
9775 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
9776 if (intr_mode == LPFC_INTR_ERROR) {
9777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9778 "0431 Failed to enable interrupt.\n");
9780 goto out_free_sysfs_attr;
9782 /* SLI-3 HBA setup */
9783 if (lpfc_sli_hba_setup(phba)) {
9784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9785 "1477 Failed to set up hba\n");
9787 goto out_remove_device;
9790 /* Wait 50ms for the interrupts of previous mailbox commands */
9792 /* Check active interrupts on message signaled interrupts */
9793 if (intr_mode == 0 ||
9794 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
9795 /* Log the current active interrupt mode */
9796 phba->intr_mode = intr_mode;
9797 lpfc_log_intr_mode(phba, intr_mode);
9800 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9801 "0447 Configure interrupt mode (%d) "
9802 "failed active interrupt test.\n",
9804 /* Disable the current interrupt mode */
9805 lpfc_sli_disable_intr(phba);
9806 /* Try next level of interrupt mode */
9807 cfg_mode = --intr_mode;
9811 /* Perform post initialization setup */
9812 lpfc_post_init_setup(phba);
9814 /* Check if there are static vports to be created. */
9815 lpfc_create_static_vport(phba);
9820 lpfc_unset_hba(phba);
9821 out_free_sysfs_attr:
9822 lpfc_free_sysfs_attr(vport);
9824 lpfc_destroy_shost(phba);
9825 out_unset_driver_resource:
9826 lpfc_unset_driver_resource_phase2(phba);
9828 lpfc_free_iocb_list(phba);
9829 out_unset_driver_resource_s3:
9830 lpfc_sli_driver_resource_unset(phba);
9831 out_unset_pci_mem_s3:
9832 lpfc_sli_pci_mem_unset(phba);
9833 out_disable_pci_dev:
9834 lpfc_disable_pci_dev(phba);
9836 scsi_host_put(shost);
9838 lpfc_hba_free(phba);
9843 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
9844 * @pdev: pointer to PCI device
9846 * This routine is to be called to disattach a device with SLI-3 interface
9847 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
9848 * removed from PCI bus, it performs all the necessary cleanup for the HBA
9849 * device to be removed from the PCI subsystem properly.
9852 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
9854 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9855 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9856 struct lpfc_vport **vports;
9857 struct lpfc_hba *phba = vport->phba;
9860 spin_lock_irq(&phba->hbalock);
9861 vport->load_flag |= FC_UNLOADING;
9862 spin_unlock_irq(&phba->hbalock);
9864 lpfc_free_sysfs_attr(vport);
9866 /* Release all the vports against this physical port */
9867 vports = lpfc_create_vport_work_array(phba);
9869 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9870 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9872 fc_vport_terminate(vports[i]->fc_vport);
9874 lpfc_destroy_vport_work_array(phba, vports);
9876 /* Remove FC host and then SCSI host with the physical port */
9877 fc_remove_host(shost);
9878 scsi_remove_host(shost);
9879 lpfc_cleanup(vport);
9882 * Bring down the SLI Layer. This step disable all interrupts,
9883 * clears the rings, discards all mailbox commands, and resets
9887 /* HBA interrupt will be disabled after this call */
9888 lpfc_sli_hba_down(phba);
9889 /* Stop kthread signal shall trigger work_done one more time */
9890 kthread_stop(phba->worker_thread);
9891 /* Final cleanup of txcmplq and reset the HBA */
9892 lpfc_sli_brdrestart(phba);
9894 kfree(phba->vpi_bmask);
9895 kfree(phba->vpi_ids);
9897 lpfc_stop_hba_timers(phba);
9898 spin_lock_irq(&phba->hbalock);
9899 list_del_init(&vport->listentry);
9900 spin_unlock_irq(&phba->hbalock);
9902 lpfc_debugfs_terminate(vport);
9904 /* Disable SR-IOV if enabled */
9905 if (phba->cfg_sriov_nr_virtfn)
9906 pci_disable_sriov(pdev);
9908 /* Disable interrupt */
9909 lpfc_sli_disable_intr(phba);
9911 scsi_host_put(shost);
9914 * Call scsi_free before mem_free since scsi bufs are released to their
9915 * corresponding pools here.
9917 lpfc_scsi_free(phba);
9918 lpfc_mem_free_all(phba);
9920 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9921 phba->hbqslimp.virt, phba->hbqslimp.phys);
9923 /* Free resources associated with SLI2 interface */
9924 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9925 phba->slim2p.virt, phba->slim2p.phys);
9927 /* unmap adapter SLIM and Control Registers */
9928 iounmap(phba->ctrl_regs_memmap_p);
9929 iounmap(phba->slim_memmap_p);
9931 lpfc_hba_free(phba);
9933 pci_release_mem_regions(pdev);
9934 pci_disable_device(pdev);
9938 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
9939 * @pdev: pointer to PCI device
9940 * @msg: power management message
9942 * This routine is to be called from the kernel's PCI subsystem to support
9943 * system Power Management (PM) to device with SLI-3 interface spec. When
9944 * PM invokes this method, it quiesces the device by stopping the driver's
9945 * worker thread for the device, turning off device's interrupt and DMA,
9946 * and bring the device offline. Note that as the driver implements the
9947 * minimum PM requirements to a power-aware driver's PM support for the
9948 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
9949 * to the suspend() method call will be treated as SUSPEND and the driver will
9950 * fully reinitialize its device during resume() method call, the driver will
9951 * set device to PCI_D3hot state in PCI config space instead of setting it
9952 * according to the @msg provided by the PM.
9955 * 0 - driver suspended the device
9959 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
9961 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9962 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9964 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9965 "0473 PCI device Power Management suspend.\n");
9967 /* Bring down the device */
9968 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9970 kthread_stop(phba->worker_thread);
9972 /* Disable interrupt from device */
9973 lpfc_sli_disable_intr(phba);
9975 /* Save device state to PCI config space */
9976 pci_save_state(pdev);
9977 pci_set_power_state(pdev, PCI_D3hot);
9983 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
9984 * @pdev: pointer to PCI device
9986 * This routine is to be called from the kernel's PCI subsystem to support
9987 * system Power Management (PM) to device with SLI-3 interface spec. When PM
9988 * invokes this method, it restores the device's PCI config space state and
9989 * fully reinitializes the device and brings it online. Note that as the
9990 * driver implements the minimum PM requirements to a power-aware driver's
9991 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
9992 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
9993 * driver will fully reinitialize its device during resume() method call,
9994 * the device will be set to PCI_D0 directly in PCI config space before
9995 * restoring the state.
9998 * 0 - driver suspended the device
10002 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
10004 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10005 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10006 uint32_t intr_mode;
10009 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10010 "0452 PCI device Power Management resume.\n");
10012 /* Restore device state from PCI config space */
10013 pci_set_power_state(pdev, PCI_D0);
10014 pci_restore_state(pdev);
10017 * As the new kernel behavior of pci_restore_state() API call clears
10018 * device saved_state flag, need to save the restored state again.
10020 pci_save_state(pdev);
10022 if (pdev->is_busmaster)
10023 pci_set_master(pdev);
10025 /* Startup the kernel thread for this host adapter. */
10026 phba->worker_thread = kthread_run(lpfc_do_work, phba,
10027 "lpfc_worker_%d", phba->brd_no);
10028 if (IS_ERR(phba->worker_thread)) {
10029 error = PTR_ERR(phba->worker_thread);
10030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10031 "0434 PM resume failed to start worker "
10032 "thread: error=x%x.\n", error);
10036 /* Configure and enable interrupt */
10037 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
10038 if (intr_mode == LPFC_INTR_ERROR) {
10039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10040 "0430 PM resume Failed to enable interrupt\n");
10043 phba->intr_mode = intr_mode;
10045 /* Restart HBA and bring it online */
10046 lpfc_sli_brdrestart(phba);
10049 /* Log the current active interrupt mode */
10050 lpfc_log_intr_mode(phba, phba->intr_mode);
10056 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
10057 * @phba: pointer to lpfc hba data structure.
10059 * This routine is called to prepare the SLI3 device for PCI slot recover. It
10060 * aborts all the outstanding SCSI I/Os to the pci device.
10063 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
10065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10066 "2723 PCI channel I/O abort preparing for recovery\n");
10069 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10070 * and let the SCSI mid-layer to retry them to recover.
10072 lpfc_sli_abort_fcp_rings(phba);
10076 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
10077 * @phba: pointer to lpfc hba data structure.
10079 * This routine is called to prepare the SLI3 device for PCI slot reset. It
10080 * disables the device interrupt and pci device, and aborts the internal FCP
10084 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
10086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10087 "2710 PCI channel disable preparing for reset\n");
10089 /* Block any management I/Os to the device */
10090 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
10092 /* Block all SCSI devices' I/Os on the host */
10093 lpfc_scsi_dev_block(phba);
10095 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10096 lpfc_sli_flush_fcp_rings(phba);
10098 /* stop all timers */
10099 lpfc_stop_hba_timers(phba);
10101 /* Disable interrupt and pci device */
10102 lpfc_sli_disable_intr(phba);
10103 pci_disable_device(phba->pcidev);
10107 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
10108 * @phba: pointer to lpfc hba data structure.
10110 * This routine is called to prepare the SLI3 device for PCI slot permanently
10111 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10115 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10118 "2711 PCI channel permanent disable for failure\n");
10119 /* Block all SCSI devices' I/Os on the host */
10120 lpfc_scsi_dev_block(phba);
10122 /* stop all timers */
10123 lpfc_stop_hba_timers(phba);
10125 /* Clean up all driver's outstanding SCSI I/Os */
10126 lpfc_sli_flush_fcp_rings(phba);
10130 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
10131 * @pdev: pointer to PCI device.
10132 * @state: the current PCI connection state.
10134 * This routine is called from the PCI subsystem for I/O error handling to
10135 * device with SLI-3 interface spec. This function is called by the PCI
10136 * subsystem after a PCI bus error affecting this device has been detected.
10137 * When this function is invoked, it will need to stop all the I/Os and
10138 * interrupt(s) to the device. Once that is done, it will return
10139 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
10143 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
10144 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10145 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10147 static pci_ers_result_t
10148 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
10150 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10151 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10154 case pci_channel_io_normal:
10155 /* Non-fatal error, prepare for recovery */
10156 lpfc_sli_prep_dev_for_recover(phba);
10157 return PCI_ERS_RESULT_CAN_RECOVER;
10158 case pci_channel_io_frozen:
10159 /* Fatal error, prepare for slot reset */
10160 lpfc_sli_prep_dev_for_reset(phba);
10161 return PCI_ERS_RESULT_NEED_RESET;
10162 case pci_channel_io_perm_failure:
10163 /* Permanent failure, prepare for device down */
10164 lpfc_sli_prep_dev_for_perm_failure(phba);
10165 return PCI_ERS_RESULT_DISCONNECT;
10167 /* Unknown state, prepare and request slot reset */
10168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10169 "0472 Unknown PCI error state: x%x\n", state);
10170 lpfc_sli_prep_dev_for_reset(phba);
10171 return PCI_ERS_RESULT_NEED_RESET;
10176 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
10177 * @pdev: pointer to PCI device.
10179 * This routine is called from the PCI subsystem for error handling to
10180 * device with SLI-3 interface spec. This is called after PCI bus has been
10181 * reset to restart the PCI card from scratch, as if from a cold-boot.
10182 * During the PCI subsystem error recovery, after driver returns
10183 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10184 * recovery and then call this routine before calling the .resume method
10185 * to recover the device. This function will initialize the HBA device,
10186 * enable the interrupt, but it will just put the HBA to offline state
10187 * without passing any I/O traffic.
10190 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10191 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10193 static pci_ers_result_t
10194 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
10196 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10197 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10198 struct lpfc_sli *psli = &phba->sli;
10199 uint32_t intr_mode;
10201 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10202 if (pci_enable_device_mem(pdev)) {
10203 printk(KERN_ERR "lpfc: Cannot re-enable "
10204 "PCI device after reset.\n");
10205 return PCI_ERS_RESULT_DISCONNECT;
10208 pci_restore_state(pdev);
10211 * As the new kernel behavior of pci_restore_state() API call clears
10212 * device saved_state flag, need to save the restored state again.
10214 pci_save_state(pdev);
10216 if (pdev->is_busmaster)
10217 pci_set_master(pdev);
10219 spin_lock_irq(&phba->hbalock);
10220 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10221 spin_unlock_irq(&phba->hbalock);
10223 /* Configure and enable interrupt */
10224 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
10225 if (intr_mode == LPFC_INTR_ERROR) {
10226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10227 "0427 Cannot re-enable interrupt after "
10229 return PCI_ERS_RESULT_DISCONNECT;
10231 phba->intr_mode = intr_mode;
10233 /* Take device offline, it will perform cleanup */
10234 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10235 lpfc_offline(phba);
10236 lpfc_sli_brdrestart(phba);
10238 /* Log the current active interrupt mode */
10239 lpfc_log_intr_mode(phba, phba->intr_mode);
10241 return PCI_ERS_RESULT_RECOVERED;
10245 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
10246 * @pdev: pointer to PCI device
10248 * This routine is called from the PCI subsystem for error handling to device
10249 * with SLI-3 interface spec. It is called when kernel error recovery tells
10250 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10251 * error recovery. After this call, traffic can start to flow from this device
10255 lpfc_io_resume_s3(struct pci_dev *pdev)
10257 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10258 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10260 /* Bring device online, it will be no-op for non-fatal error resume */
10263 /* Clean up Advanced Error Reporting (AER) if needed */
10264 if (phba->hba_flag & HBA_AER_ENABLED)
10265 pci_cleanup_aer_uncorrect_error_status(pdev);
10269 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
10270 * @phba: pointer to lpfc hba data structure.
10272 * returns the number of ELS/CT IOCBs to reserve
10275 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
10277 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
10279 if (phba->sli_rev == LPFC_SLI_REV4) {
10280 if (max_xri <= 100)
10282 else if (max_xri <= 256)
10284 else if (max_xri <= 512)
10286 else if (max_xri <= 1024)
10288 else if (max_xri <= 1536)
10290 else if (max_xri <= 2048)
10299 * lpfc_write_firmware - attempt to write a firmware image to the port
10300 * @fw: pointer to firmware image returned from request_firmware.
10301 * @phba: pointer to lpfc hba data structure.
10305 lpfc_write_firmware(const struct firmware *fw, void *context)
10307 struct lpfc_hba *phba = (struct lpfc_hba *)context;
10308 char fwrev[FW_REV_STR_SIZE];
10309 struct lpfc_grp_hdr *image;
10310 struct list_head dma_buffer_list;
10312 struct lpfc_dmabuf *dmabuf, *next;
10313 uint32_t offset = 0, temp_offset = 0;
10314 uint32_t magic_number, ftype, fid, fsize;
10316 /* It can be null in no-wait mode, sanity check */
10321 image = (struct lpfc_grp_hdr *)fw->data;
10323 magic_number = be32_to_cpu(image->magic_number);
10324 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
10325 fid = bf_get_be32(lpfc_grp_hdr_id, image),
10326 fsize = be32_to_cpu(image->size);
10328 INIT_LIST_HEAD(&dma_buffer_list);
10329 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
10330 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
10331 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
10332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10333 "3022 Invalid FW image found. "
10334 "Magic:%x Type:%x ID:%x Size %d %zd\n",
10335 magic_number, ftype, fid, fsize, fw->size);
10339 lpfc_decode_firmware_rev(phba, fwrev, 1);
10340 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
10341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10342 "3023 Updating Firmware, Current Version:%s "
10343 "New Version:%s\n",
10344 fwrev, image->revision);
10345 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
10346 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
10352 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
10356 if (!dmabuf->virt) {
10361 list_add_tail(&dmabuf->list, &dma_buffer_list);
10363 while (offset < fw->size) {
10364 temp_offset = offset;
10365 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
10366 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
10367 memcpy(dmabuf->virt,
10368 fw->data + temp_offset,
10369 fw->size - temp_offset);
10370 temp_offset = fw->size;
10373 memcpy(dmabuf->virt, fw->data + temp_offset,
10375 temp_offset += SLI4_PAGE_SIZE;
10377 rc = lpfc_wr_object(phba, &dma_buffer_list,
10378 (fw->size - offset), &offset);
10386 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
10387 list_del(&dmabuf->list);
10388 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
10389 dmabuf->virt, dmabuf->phys);
10392 release_firmware(fw);
10394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10395 "3024 Firmware update done: %d.\n", rc);
10400 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
10401 * @phba: pointer to lpfc hba data structure.
10403 * This routine is called to perform Linux generic firmware upgrade on device
10404 * that supports such feature.
10407 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
10409 uint8_t file_name[ELX_MODEL_NAME_SIZE];
10411 const struct firmware *fw;
10413 /* Only supported on SLI4 interface type 2 for now */
10414 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
10415 LPFC_SLI_INTF_IF_TYPE_2)
10418 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
10420 if (fw_upgrade == INT_FW_UPGRADE) {
10421 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
10422 file_name, &phba->pcidev->dev,
10423 GFP_KERNEL, (void *)phba,
10424 lpfc_write_firmware);
10425 } else if (fw_upgrade == RUN_FW_UPGRADE) {
10426 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
10428 lpfc_write_firmware(fw, (void *)phba);
10437 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
10438 * @pdev: pointer to PCI device
10439 * @pid: pointer to PCI device identifier
10441 * This routine is called from the kernel's PCI subsystem to device with
10442 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10443 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
10444 * information of the device and driver to see if the driver state that it
10445 * can support this kind of device. If the match is successful, the driver
10446 * core invokes this routine. If this routine determines it can claim the HBA,
10447 * it does all the initialization that it needs to do to handle the HBA
10451 * 0 - driver can claim the device
10452 * negative value - driver can not claim the device
10455 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
10457 struct lpfc_hba *phba;
10458 struct lpfc_vport *vport = NULL;
10459 struct Scsi_Host *shost = NULL;
10461 uint32_t cfg_mode, intr_mode;
10462 int adjusted_fcp_io_channel;
10464 /* Allocate memory for HBA structure */
10465 phba = lpfc_hba_alloc(pdev);
10469 /* Perform generic PCI device enabling operation */
10470 error = lpfc_enable_pci_dev(phba);
10472 goto out_free_phba;
10474 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
10475 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
10477 goto out_disable_pci_dev;
10479 /* Set up SLI-4 specific device PCI memory space */
10480 error = lpfc_sli4_pci_mem_setup(phba);
10482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10483 "1410 Failed to set up pci memory space.\n");
10484 goto out_disable_pci_dev;
10487 /* Set up phase-1 common device driver resources */
10488 error = lpfc_setup_driver_resource_phase1(phba);
10490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10491 "1411 Failed to set up driver resource.\n");
10492 goto out_unset_pci_mem_s4;
10495 /* Set up SLI-4 Specific device driver resources */
10496 error = lpfc_sli4_driver_resource_setup(phba);
10498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10499 "1412 Failed to set up driver resource.\n");
10500 goto out_unset_pci_mem_s4;
10503 /* Initialize and populate the iocb list per host */
10505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10506 "2821 initialize iocb list %d.\n",
10507 phba->cfg_iocb_cnt*1024);
10508 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
10511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10512 "1413 Failed to initialize iocb list.\n");
10513 goto out_unset_driver_resource_s4;
10516 INIT_LIST_HEAD(&phba->active_rrq_list);
10517 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
10519 /* Set up common device driver resources */
10520 error = lpfc_setup_driver_resource_phase2(phba);
10522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10523 "1414 Failed to set up driver resource.\n");
10524 goto out_free_iocb_list;
10527 /* Get the default values for Model Name and Description */
10528 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10530 /* Create SCSI host to the physical port */
10531 error = lpfc_create_shost(phba);
10533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10534 "1415 Failed to create scsi host.\n");
10535 goto out_unset_driver_resource;
10538 /* Configure sysfs attributes */
10539 vport = phba->pport;
10540 error = lpfc_alloc_sysfs_attr(vport);
10542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10543 "1416 Failed to allocate sysfs attr\n");
10544 goto out_destroy_shost;
10547 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
10548 /* Now, trying to enable interrupt and bring up the device */
10549 cfg_mode = phba->cfg_use_msi;
10551 /* Put device to a known state before enabling interrupt */
10552 lpfc_stop_port(phba);
10553 /* Configure and enable interrupt */
10554 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
10555 if (intr_mode == LPFC_INTR_ERROR) {
10556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10557 "0426 Failed to enable interrupt.\n");
10559 goto out_free_sysfs_attr;
10561 /* Default to single EQ for non-MSI-X */
10562 if (phba->intr_type != MSIX)
10563 adjusted_fcp_io_channel = 1;
10565 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
10566 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
10567 /* Set up SLI-4 HBA */
10568 if (lpfc_sli4_hba_setup(phba)) {
10569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10570 "1421 Failed to set up hba\n");
10572 goto out_disable_intr;
10575 /* Log the current active interrupt mode */
10576 phba->intr_mode = intr_mode;
10577 lpfc_log_intr_mode(phba, intr_mode);
10579 /* Perform post initialization setup */
10580 lpfc_post_init_setup(phba);
10582 /* check for firmware upgrade or downgrade */
10583 if (phba->cfg_request_firmware_upgrade)
10584 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
10586 /* Check if there are static vports to be created. */
10587 lpfc_create_static_vport(phba);
10591 lpfc_sli4_disable_intr(phba);
10592 out_free_sysfs_attr:
10593 lpfc_free_sysfs_attr(vport);
10595 lpfc_destroy_shost(phba);
10596 out_unset_driver_resource:
10597 lpfc_unset_driver_resource_phase2(phba);
10598 out_free_iocb_list:
10599 lpfc_free_iocb_list(phba);
10600 out_unset_driver_resource_s4:
10601 lpfc_sli4_driver_resource_unset(phba);
10602 out_unset_pci_mem_s4:
10603 lpfc_sli4_pci_mem_unset(phba);
10604 out_disable_pci_dev:
10605 lpfc_disable_pci_dev(phba);
10607 scsi_host_put(shost);
10609 lpfc_hba_free(phba);
10614 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
10615 * @pdev: pointer to PCI device
10617 * This routine is called from the kernel's PCI subsystem to device with
10618 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
10619 * removed from PCI bus, it performs all the necessary cleanup for the HBA
10620 * device to be removed from the PCI subsystem properly.
10623 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
10625 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10626 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
10627 struct lpfc_vport **vports;
10628 struct lpfc_hba *phba = vport->phba;
10631 /* Mark the device unloading flag */
10632 spin_lock_irq(&phba->hbalock);
10633 vport->load_flag |= FC_UNLOADING;
10634 spin_unlock_irq(&phba->hbalock);
10636 /* Free the HBA sysfs attributes */
10637 lpfc_free_sysfs_attr(vport);
10639 /* Release all the vports against this physical port */
10640 vports = lpfc_create_vport_work_array(phba);
10641 if (vports != NULL)
10642 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10643 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10645 fc_vport_terminate(vports[i]->fc_vport);
10647 lpfc_destroy_vport_work_array(phba, vports);
10649 /* Remove FC host and then SCSI host with the physical port */
10650 fc_remove_host(shost);
10651 scsi_remove_host(shost);
10653 /* Perform cleanup on the physical port */
10654 lpfc_cleanup(vport);
10657 * Bring down the SLI Layer. This step disables all interrupts,
10658 * clears the rings, discards all mailbox commands, and resets
10659 * the HBA FCoE function.
10661 lpfc_debugfs_terminate(vport);
10662 lpfc_sli4_hba_unset(phba);
10664 spin_lock_irq(&phba->hbalock);
10665 list_del_init(&vport->listentry);
10666 spin_unlock_irq(&phba->hbalock);
10668 /* Perform scsi free before driver resource_unset since scsi
10669 * buffers are released to their corresponding pools here.
10671 lpfc_scsi_free(phba);
10673 lpfc_sli4_driver_resource_unset(phba);
10675 /* Unmap adapter Control and Doorbell registers */
10676 lpfc_sli4_pci_mem_unset(phba);
10678 /* Release PCI resources and disable device's PCI function */
10679 scsi_host_put(shost);
10680 lpfc_disable_pci_dev(phba);
10682 /* Finally, free the driver's device data structure */
10683 lpfc_hba_free(phba);
10689 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
10690 * @pdev: pointer to PCI device
10691 * @msg: power management message
10693 * This routine is called from the kernel's PCI subsystem to support system
10694 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
10695 * this method, it quiesces the device by stopping the driver's worker
10696 * thread for the device, turning off device's interrupt and DMA, and bring
10697 * the device offline. Note that as the driver implements the minimum PM
10698 * requirements to a power-aware driver's PM support for suspend/resume -- all
10699 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
10700 * method call will be treated as SUSPEND and the driver will fully
10701 * reinitialize its device during resume() method call, the driver will set
10702 * device to PCI_D3hot state in PCI config space instead of setting it
10703 * according to the @msg provided by the PM.
10706 * 0 - driver suspended the device
10710 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
10712 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10713 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10715 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10716 "2843 PCI device Power Management suspend.\n");
10718 /* Bring down the device */
10719 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10720 lpfc_offline(phba);
10721 kthread_stop(phba->worker_thread);
10723 /* Disable interrupt from device */
10724 lpfc_sli4_disable_intr(phba);
10725 lpfc_sli4_queue_destroy(phba);
10727 /* Save device state to PCI config space */
10728 pci_save_state(pdev);
10729 pci_set_power_state(pdev, PCI_D3hot);
10735 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
10736 * @pdev: pointer to PCI device
10738 * This routine is called from the kernel's PCI subsystem to support system
10739 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
10740 * this method, it restores the device's PCI config space state and fully
10741 * reinitializes the device and brings it online. Note that as the driver
10742 * implements the minimum PM requirements to a power-aware driver's PM for
10743 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
10744 * to the suspend() method call will be treated as SUSPEND and the driver
10745 * will fully reinitialize its device during resume() method call, the device
10746 * will be set to PCI_D0 directly in PCI config space before restoring the
10750 * 0 - driver suspended the device
10754 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
10756 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10757 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10758 uint32_t intr_mode;
10761 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10762 "0292 PCI device Power Management resume.\n");
10764 /* Restore device state from PCI config space */
10765 pci_set_power_state(pdev, PCI_D0);
10766 pci_restore_state(pdev);
10769 * As the new kernel behavior of pci_restore_state() API call clears
10770 * device saved_state flag, need to save the restored state again.
10772 pci_save_state(pdev);
10774 if (pdev->is_busmaster)
10775 pci_set_master(pdev);
10777 /* Startup the kernel thread for this host adapter. */
10778 phba->worker_thread = kthread_run(lpfc_do_work, phba,
10779 "lpfc_worker_%d", phba->brd_no);
10780 if (IS_ERR(phba->worker_thread)) {
10781 error = PTR_ERR(phba->worker_thread);
10782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10783 "0293 PM resume failed to start worker "
10784 "thread: error=x%x.\n", error);
10788 /* Configure and enable interrupt */
10789 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10790 if (intr_mode == LPFC_INTR_ERROR) {
10791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10792 "0294 PM resume Failed to enable interrupt\n");
10795 phba->intr_mode = intr_mode;
10797 /* Restart HBA and bring it online */
10798 lpfc_sli_brdrestart(phba);
10801 /* Log the current active interrupt mode */
10802 lpfc_log_intr_mode(phba, phba->intr_mode);
10808 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
10809 * @phba: pointer to lpfc hba data structure.
10811 * This routine is called to prepare the SLI4 device for PCI slot recover. It
10812 * aborts all the outstanding SCSI I/Os to the pci device.
10815 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
10817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10818 "2828 PCI channel I/O abort preparing for recovery\n");
10820 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
10821 * and let the SCSI mid-layer to retry them to recover.
10823 lpfc_sli_abort_fcp_rings(phba);
10827 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
10828 * @phba: pointer to lpfc hba data structure.
10830 * This routine is called to prepare the SLI4 device for PCI slot reset. It
10831 * disables the device interrupt and pci device, and aborts the internal FCP
10835 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
10837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10838 "2826 PCI channel disable preparing for reset\n");
10840 /* Block any management I/Os to the device */
10841 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
10843 /* Block all SCSI devices' I/Os on the host */
10844 lpfc_scsi_dev_block(phba);
10846 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
10847 lpfc_sli_flush_fcp_rings(phba);
10849 /* stop all timers */
10850 lpfc_stop_hba_timers(phba);
10852 /* Disable interrupt and pci device */
10853 lpfc_sli4_disable_intr(phba);
10854 lpfc_sli4_queue_destroy(phba);
10855 pci_disable_device(phba->pcidev);
10859 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
10860 * @phba: pointer to lpfc hba data structure.
10862 * This routine is called to prepare the SLI4 device for PCI slot permanently
10863 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
10867 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10870 "2827 PCI channel permanent disable for failure\n");
10872 /* Block all SCSI devices' I/Os on the host */
10873 lpfc_scsi_dev_block(phba);
10875 /* stop all timers */
10876 lpfc_stop_hba_timers(phba);
10878 /* Clean up all driver's outstanding SCSI I/Os */
10879 lpfc_sli_flush_fcp_rings(phba);
10883 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
10884 * @pdev: pointer to PCI device.
10885 * @state: the current PCI connection state.
10887 * This routine is called from the PCI subsystem for error handling to device
10888 * with SLI-4 interface spec. This function is called by the PCI subsystem
10889 * after a PCI bus error affecting this device has been detected. When this
10890 * function is invoked, it will need to stop all the I/Os and interrupt(s)
10891 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
10892 * for the PCI subsystem to perform proper recovery as desired.
10895 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
10896 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10898 static pci_ers_result_t
10899 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
10901 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10902 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10905 case pci_channel_io_normal:
10906 /* Non-fatal error, prepare for recovery */
10907 lpfc_sli4_prep_dev_for_recover(phba);
10908 return PCI_ERS_RESULT_CAN_RECOVER;
10909 case pci_channel_io_frozen:
10910 /* Fatal error, prepare for slot reset */
10911 lpfc_sli4_prep_dev_for_reset(phba);
10912 return PCI_ERS_RESULT_NEED_RESET;
10913 case pci_channel_io_perm_failure:
10914 /* Permanent failure, prepare for device down */
10915 lpfc_sli4_prep_dev_for_perm_failure(phba);
10916 return PCI_ERS_RESULT_DISCONNECT;
10918 /* Unknown state, prepare and request slot reset */
10919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10920 "2825 Unknown PCI error state: x%x\n", state);
10921 lpfc_sli4_prep_dev_for_reset(phba);
10922 return PCI_ERS_RESULT_NEED_RESET;
10927 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
10928 * @pdev: pointer to PCI device.
10930 * This routine is called from the PCI subsystem for error handling to device
10931 * with SLI-4 interface spec. It is called after PCI bus has been reset to
10932 * restart the PCI card from scratch, as if from a cold-boot. During the
10933 * PCI subsystem error recovery, after the driver returns
10934 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
10935 * recovery and then call this routine before calling the .resume method to
10936 * recover the device. This function will initialize the HBA device, enable
10937 * the interrupt, but it will just put the HBA to offline state without
10938 * passing any I/O traffic.
10941 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
10942 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
10944 static pci_ers_result_t
10945 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
10947 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10948 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10949 struct lpfc_sli *psli = &phba->sli;
10950 uint32_t intr_mode;
10952 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10953 if (pci_enable_device_mem(pdev)) {
10954 printk(KERN_ERR "lpfc: Cannot re-enable "
10955 "PCI device after reset.\n");
10956 return PCI_ERS_RESULT_DISCONNECT;
10959 pci_restore_state(pdev);
10962 * As the new kernel behavior of pci_restore_state() API call clears
10963 * device saved_state flag, need to save the restored state again.
10965 pci_save_state(pdev);
10967 if (pdev->is_busmaster)
10968 pci_set_master(pdev);
10970 spin_lock_irq(&phba->hbalock);
10971 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10972 spin_unlock_irq(&phba->hbalock);
10974 /* Configure and enable interrupt */
10975 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10976 if (intr_mode == LPFC_INTR_ERROR) {
10977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10978 "2824 Cannot re-enable interrupt after "
10980 return PCI_ERS_RESULT_DISCONNECT;
10982 phba->intr_mode = intr_mode;
10984 /* Log the current active interrupt mode */
10985 lpfc_log_intr_mode(phba, phba->intr_mode);
10987 return PCI_ERS_RESULT_RECOVERED;
10991 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
10992 * @pdev: pointer to PCI device
10994 * This routine is called from the PCI subsystem for error handling to device
10995 * with SLI-4 interface spec. It is called when kernel error recovery tells
10996 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
10997 * error recovery. After this call, traffic can start to flow from this device
11001 lpfc_io_resume_s4(struct pci_dev *pdev)
11003 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11004 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11007 * In case of slot reset, as function reset is performed through
11008 * mailbox command which needs DMA to be enabled, this operation
11009 * has to be moved to the io resume phase. Taking device offline
11010 * will perform the necessary cleanup.
11012 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
11013 /* Perform device reset */
11014 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
11015 lpfc_offline(phba);
11016 lpfc_sli_brdrestart(phba);
11017 /* Bring the device back online */
11021 /* Clean up Advanced Error Reporting (AER) if needed */
11022 if (phba->hba_flag & HBA_AER_ENABLED)
11023 pci_cleanup_aer_uncorrect_error_status(pdev);
11027 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
11028 * @pdev: pointer to PCI device
11029 * @pid: pointer to PCI device identifier
11031 * This routine is to be registered to the kernel's PCI subsystem. When an
11032 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
11033 * at PCI device-specific information of the device and driver to see if the
11034 * driver state that it can support this kind of device. If the match is
11035 * successful, the driver core invokes this routine. This routine dispatches
11036 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
11037 * do all the initialization that it needs to do to handle the HBA device
11041 * 0 - driver can claim the device
11042 * negative value - driver can not claim the device
11045 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
11048 struct lpfc_sli_intf intf;
11050 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
11053 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
11054 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
11055 rc = lpfc_pci_probe_one_s4(pdev, pid);
11057 rc = lpfc_pci_probe_one_s3(pdev, pid);
11063 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
11064 * @pdev: pointer to PCI device
11066 * This routine is to be registered to the kernel's PCI subsystem. When an
11067 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
11068 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
11069 * remove routine, which will perform all the necessary cleanup for the
11070 * device to be removed from the PCI subsystem properly.
11073 lpfc_pci_remove_one(struct pci_dev *pdev)
11075 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11076 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11078 switch (phba->pci_dev_grp) {
11079 case LPFC_PCI_DEV_LP:
11080 lpfc_pci_remove_one_s3(pdev);
11082 case LPFC_PCI_DEV_OC:
11083 lpfc_pci_remove_one_s4(pdev);
11086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11087 "1424 Invalid PCI device group: 0x%x\n",
11088 phba->pci_dev_grp);
11095 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
11096 * @pdev: pointer to PCI device
11097 * @msg: power management message
11099 * This routine is to be registered to the kernel's PCI subsystem to support
11100 * system Power Management (PM). When PM invokes this method, it dispatches
11101 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
11102 * suspend the device.
11105 * 0 - driver suspended the device
11109 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
11111 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11112 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11115 switch (phba->pci_dev_grp) {
11116 case LPFC_PCI_DEV_LP:
11117 rc = lpfc_pci_suspend_one_s3(pdev, msg);
11119 case LPFC_PCI_DEV_OC:
11120 rc = lpfc_pci_suspend_one_s4(pdev, msg);
11123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11124 "1425 Invalid PCI device group: 0x%x\n",
11125 phba->pci_dev_grp);
11132 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
11133 * @pdev: pointer to PCI device
11135 * This routine is to be registered to the kernel's PCI subsystem to support
11136 * system Power Management (PM). When PM invokes this method, it dispatches
11137 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
11138 * resume the device.
11141 * 0 - driver suspended the device
11145 lpfc_pci_resume_one(struct pci_dev *pdev)
11147 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11148 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11151 switch (phba->pci_dev_grp) {
11152 case LPFC_PCI_DEV_LP:
11153 rc = lpfc_pci_resume_one_s3(pdev);
11155 case LPFC_PCI_DEV_OC:
11156 rc = lpfc_pci_resume_one_s4(pdev);
11159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11160 "1426 Invalid PCI device group: 0x%x\n",
11161 phba->pci_dev_grp);
11168 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
11169 * @pdev: pointer to PCI device.
11170 * @state: the current PCI connection state.
11172 * This routine is registered to the PCI subsystem for error handling. This
11173 * function is called by the PCI subsystem after a PCI bus error affecting
11174 * this device has been detected. When this routine is invoked, it dispatches
11175 * the action to the proper SLI-3 or SLI-4 device error detected handling
11176 * routine, which will perform the proper error detected operation.
11179 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
11180 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11182 static pci_ers_result_t
11183 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
11185 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11187 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
11189 switch (phba->pci_dev_grp) {
11190 case LPFC_PCI_DEV_LP:
11191 rc = lpfc_io_error_detected_s3(pdev, state);
11193 case LPFC_PCI_DEV_OC:
11194 rc = lpfc_io_error_detected_s4(pdev, state);
11197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11198 "1427 Invalid PCI device group: 0x%x\n",
11199 phba->pci_dev_grp);
11206 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
11207 * @pdev: pointer to PCI device.
11209 * This routine is registered to the PCI subsystem for error handling. This
11210 * function is called after PCI bus has been reset to restart the PCI card
11211 * from scratch, as if from a cold-boot. When this routine is invoked, it
11212 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
11213 * routine, which will perform the proper device reset.
11216 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
11217 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
11219 static pci_ers_result_t
11220 lpfc_io_slot_reset(struct pci_dev *pdev)
11222 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11223 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11224 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
11226 switch (phba->pci_dev_grp) {
11227 case LPFC_PCI_DEV_LP:
11228 rc = lpfc_io_slot_reset_s3(pdev);
11230 case LPFC_PCI_DEV_OC:
11231 rc = lpfc_io_slot_reset_s4(pdev);
11234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11235 "1428 Invalid PCI device group: 0x%x\n",
11236 phba->pci_dev_grp);
11243 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
11244 * @pdev: pointer to PCI device
11246 * This routine is registered to the PCI subsystem for error handling. It
11247 * is called when kernel error recovery tells the lpfc driver that it is
11248 * OK to resume normal PCI operation after PCI bus error recovery. When
11249 * this routine is invoked, it dispatches the action to the proper SLI-3
11250 * or SLI-4 device io_resume routine, which will resume the device operation.
11253 lpfc_io_resume(struct pci_dev *pdev)
11255 struct Scsi_Host *shost = pci_get_drvdata(pdev);
11256 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
11258 switch (phba->pci_dev_grp) {
11259 case LPFC_PCI_DEV_LP:
11260 lpfc_io_resume_s3(pdev);
11262 case LPFC_PCI_DEV_OC:
11263 lpfc_io_resume_s4(pdev);
11266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11267 "1429 Invalid PCI device group: 0x%x\n",
11268 phba->pci_dev_grp);
11275 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
11276 * @phba: pointer to lpfc hba data structure.
11278 * This routine checks to see if OAS is supported for this adapter. If
11279 * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
11280 * the enable oas flag is cleared and the pool created for OAS device data
11285 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
11288 if (!phba->cfg_EnableXLane)
11291 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
11295 if (phba->device_data_mem_pool)
11296 mempool_destroy(phba->device_data_mem_pool);
11297 phba->device_data_mem_pool = NULL;
11304 * lpfc_fof_queue_setup - Set up all the fof queues
11305 * @phba: pointer to lpfc hba data structure.
11307 * This routine is invoked to set up all the fof queues for the FC HBA
11312 * -ENOMEM - No available memory
11315 lpfc_fof_queue_setup(struct lpfc_hba *phba)
11317 struct lpfc_sli *psli = &phba->sli;
11320 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
11324 if (phba->cfg_fof) {
11326 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
11327 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
11331 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
11332 phba->sli4_hba.oas_cq, LPFC_FCP);
11336 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
11337 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
11343 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
11345 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
11351 * lpfc_fof_queue_create - Create all the fof queues
11352 * @phba: pointer to lpfc hba data structure.
11354 * This routine is invoked to allocate all the fof queues for the FC HBA
11355 * operation. For each SLI4 queue type, the parameters such as queue entry
11356 * count (queue depth) shall be taken from the module parameter. For now,
11357 * we just use some constant number as place holder.
11361 * -ENOMEM - No availble memory
11362 * -EIO - The mailbox failed to complete successfully.
11365 lpfc_fof_queue_create(struct lpfc_hba *phba)
11367 struct lpfc_queue *qdesc;
11369 /* Create FOF EQ */
11370 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
11371 phba->sli4_hba.eq_ecount);
11375 phba->sli4_hba.fof_eq = qdesc;
11377 if (phba->cfg_fof) {
11379 /* Create OAS CQ */
11380 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
11381 phba->sli4_hba.cq_ecount);
11385 phba->sli4_hba.oas_cq = qdesc;
11387 /* Create OAS WQ */
11388 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
11389 phba->sli4_hba.wq_ecount);
11393 phba->sli4_hba.oas_wq = qdesc;
11399 lpfc_fof_queue_destroy(phba);
11404 * lpfc_fof_queue_destroy - Destroy all the fof queues
11405 * @phba: pointer to lpfc hba data structure.
11407 * This routine is invoked to release all the SLI4 queues with the FC HBA
11414 lpfc_fof_queue_destroy(struct lpfc_hba *phba)
11416 /* Release FOF Event queue */
11417 if (phba->sli4_hba.fof_eq != NULL) {
11418 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
11419 phba->sli4_hba.fof_eq = NULL;
11422 /* Release OAS Completion queue */
11423 if (phba->sli4_hba.oas_cq != NULL) {
11424 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
11425 phba->sli4_hba.oas_cq = NULL;
11428 /* Release OAS Work queue */
11429 if (phba->sli4_hba.oas_wq != NULL) {
11430 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
11431 phba->sli4_hba.oas_wq = NULL;
11436 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
11438 static const struct pci_error_handlers lpfc_err_handler = {
11439 .error_detected = lpfc_io_error_detected,
11440 .slot_reset = lpfc_io_slot_reset,
11441 .resume = lpfc_io_resume,
11444 static struct pci_driver lpfc_driver = {
11445 .name = LPFC_DRIVER_NAME,
11446 .id_table = lpfc_id_table,
11447 .probe = lpfc_pci_probe_one,
11448 .remove = lpfc_pci_remove_one,
11449 .suspend = lpfc_pci_suspend_one,
11450 .resume = lpfc_pci_resume_one,
11451 .err_handler = &lpfc_err_handler,
11454 static const struct file_operations lpfc_mgmt_fop = {
11455 .owner = THIS_MODULE,
11458 static struct miscdevice lpfc_mgmt_dev = {
11459 .minor = MISC_DYNAMIC_MINOR,
11460 .name = "lpfcmgmt",
11461 .fops = &lpfc_mgmt_fop,
11465 * lpfc_init - lpfc module initialization routine
11467 * This routine is to be invoked when the lpfc module is loaded into the
11468 * kernel. The special kernel macro module_init() is used to indicate the
11469 * role of this routine to the kernel as lpfc module entry point.
11473 * -ENOMEM - FC attach transport failed
11474 * all others - failed
11482 printk(LPFC_MODULE_DESC "\n");
11483 printk(LPFC_COPYRIGHT "\n");
11485 error = misc_register(&lpfc_mgmt_dev);
11487 printk(KERN_ERR "Could not register lpfcmgmt device, "
11488 "misc_register returned with status %d", error);
11490 lpfc_transport_functions.vport_create = lpfc_vport_create;
11491 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
11492 lpfc_transport_template =
11493 fc_attach_transport(&lpfc_transport_functions);
11494 if (lpfc_transport_template == NULL)
11496 lpfc_vport_transport_template =
11497 fc_attach_transport(&lpfc_vport_transport_functions);
11498 if (lpfc_vport_transport_template == NULL) {
11499 fc_release_transport(lpfc_transport_template);
11503 /* Initialize in case vector mapping is needed */
11504 lpfc_used_cpu = NULL;
11505 lpfc_present_cpu = 0;
11506 for_each_present_cpu(cpu)
11507 lpfc_present_cpu++;
11509 error = pci_register_driver(&lpfc_driver);
11511 fc_release_transport(lpfc_transport_template);
11512 fc_release_transport(lpfc_vport_transport_template);
11519 * lpfc_exit - lpfc module removal routine
11521 * This routine is invoked when the lpfc module is removed from the kernel.
11522 * The special kernel macro module_exit() is used to indicate the role of
11523 * this routine to the kernel as lpfc module exit point.
11528 misc_deregister(&lpfc_mgmt_dev);
11529 pci_unregister_driver(&lpfc_driver);
11530 fc_release_transport(lpfc_transport_template);
11531 fc_release_transport(lpfc_vport_transport_template);
11532 if (_dump_buf_data) {
11533 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
11534 "_dump_buf_data at 0x%p\n",
11535 (1L << _dump_buf_data_order), _dump_buf_data);
11536 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
11539 if (_dump_buf_dif) {
11540 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
11541 "_dump_buf_dif at 0x%p\n",
11542 (1L << _dump_buf_dif_order), _dump_buf_dif);
11543 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
11545 kfree(lpfc_used_cpu);
11546 idr_destroy(&lpfc_hba_index);
11549 module_init(lpfc_init);
11550 module_exit(lpfc_exit);
11551 MODULE_LICENSE("GPL");
11552 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
11553 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
11554 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);