2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
14 #include "qla_devtbl.h"
20 #include <target/target_core_base.h>
21 #include "qla_target.h"
24 * QLogic ISP2x00 Hardware Support Function Prototypes.
26 static int qla2x00_isp_firmware(scsi_qla_host_t *);
27 static int qla2x00_setup_chip(scsi_qla_host_t *);
28 static int qla2x00_fw_ready(scsi_qla_host_t *);
29 static int qla2x00_configure_hba(scsi_qla_host_t *);
30 static int qla2x00_configure_loop(scsi_qla_host_t *);
31 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
32 static int qla2x00_configure_fabric(scsi_qla_host_t *);
33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
34 static int qla2x00_restart_isp(scsi_qla_host_t *);
36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37 static int qla84xx_init_chip(scsi_qla_host_t *);
38 static int qla25xx_init_queues(struct qla_hw_data *);
39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
40 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
45 /* SRB Extensions ---------------------------------------------------------- */
48 qla2x00_sp_timeout(struct timer_list *t)
50 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
51 struct srb_iocb *iocb;
52 scsi_qla_host_t *vha = sp->vha;
56 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
57 req = vha->hw->req_q_map[0];
58 req->outstanding_cmds[sp->handle] = NULL;
59 iocb = &sp->u.iocb_cmd;
62 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
66 qla2x00_sp_free(void *ptr)
69 struct srb_iocb *iocb = &sp->u.iocb_cmd;
71 del_timer(&iocb->timer);
75 /* Asynchronous Login/Logout Routines -------------------------------------- */
78 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
81 struct qla_hw_data *ha = vha->hw;
83 /* Firmware should use switch negotiated r_a_tov for timeout. */
84 tmo = ha->r_a_tov / 10 * 2;
86 tmo = FX00_DEF_RATOV * 2;
87 } else if (!IS_FWI2_CAPABLE(ha)) {
89 * Except for earlier ISPs where the timeout is seeded from the
90 * initialization control block.
92 tmo = ha->login_timeout;
98 qla2x00_async_iocb_timeout(void *data)
101 fc_port_t *fcport = sp->fcport;
102 struct srb_iocb *lio = &sp->u.iocb_cmd;
105 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
106 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
107 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
109 fcport->flags &= ~FCF_ASYNC_SENT;
113 /* Retry as needed. */
114 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
115 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
116 QLA_LOGIO_LOGIN_RETRIED : 0;
117 memset(&ea, 0, sizeof(ea));
118 ea.event = FCME_PLOGI_DONE;
119 ea.fcport = sp->fcport;
120 ea.data[0] = lio->u.logio.data[0];
121 ea.data[1] = lio->u.logio.data[1];
123 qla24xx_handle_plogi_done_event(fcport->vha, &ea);
126 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
128 case SRB_CT_PTHRU_CMD:
133 sp->done(sp, QLA_FUNCTION_TIMEOUT);
139 qla2x00_async_login_sp_done(void *ptr, int res)
142 struct scsi_qla_host *vha = sp->vha;
143 struct srb_iocb *lio = &sp->u.iocb_cmd;
146 ql_dbg(ql_dbg_disc, vha, 0x20dd,
147 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
149 sp->fcport->flags &= ~FCF_ASYNC_SENT;
150 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
151 memset(&ea, 0, sizeof(ea));
152 ea.event = FCME_PLOGI_DONE;
153 ea.fcport = sp->fcport;
154 ea.data[0] = lio->u.logio.data[0];
155 ea.data[1] = lio->u.logio.data[1];
156 ea.iop[0] = lio->u.logio.iop[0];
157 ea.iop[1] = lio->u.logio.iop[1];
159 qla2x00_fcport_event_handler(vha, &ea);
166 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
170 struct srb_iocb *lio;
171 int rval = QLA_FUNCTION_FAILED;
173 if (!vha->flags.online)
176 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
177 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
178 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
181 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
185 fcport->flags |= FCF_ASYNC_SENT;
186 fcport->logout_completed = 0;
188 sp->type = SRB_LOGIN_CMD;
190 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
192 lio = &sp->u.iocb_cmd;
193 lio->timeout = qla2x00_async_iocb_timeout;
194 sp->done = qla2x00_async_login_sp_done;
195 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
197 if (fcport->fc4f_nvme)
198 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
200 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
201 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
202 rval = qla2x00_start_sp(sp);
203 if (rval != QLA_SUCCESS) {
204 fcport->flags &= ~FCF_ASYNC_SENT;
205 fcport->flags |= FCF_LOGIN_NEEDED;
206 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
210 ql_dbg(ql_dbg_disc, vha, 0x2072,
211 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
212 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
213 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
214 fcport->login_retry);
220 fcport->flags &= ~FCF_ASYNC_SENT;
225 qla2x00_async_logout_sp_done(void *ptr, int res)
228 struct srb_iocb *lio = &sp->u.iocb_cmd;
230 sp->fcport->flags &= ~FCF_ASYNC_SENT;
231 if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
232 qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
238 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
241 struct srb_iocb *lio;
244 rval = QLA_FUNCTION_FAILED;
245 fcport->flags |= FCF_ASYNC_SENT;
246 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
250 sp->type = SRB_LOGOUT_CMD;
252 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
254 lio = &sp->u.iocb_cmd;
255 lio->timeout = qla2x00_async_iocb_timeout;
256 sp->done = qla2x00_async_logout_sp_done;
257 rval = qla2x00_start_sp(sp);
258 if (rval != QLA_SUCCESS)
261 ql_dbg(ql_dbg_disc, vha, 0x2070,
262 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
263 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
264 fcport->d_id.b.area, fcport->d_id.b.al_pa,
271 fcport->flags &= ~FCF_ASYNC_SENT;
276 qla2x00_async_adisc_sp_done(void *ptr, int res)
279 struct scsi_qla_host *vha = sp->vha;
280 struct srb_iocb *lio = &sp->u.iocb_cmd;
282 if (!test_bit(UNLOADING, &vha->dpc_flags))
283 qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
289 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
293 struct srb_iocb *lio;
296 rval = QLA_FUNCTION_FAILED;
297 fcport->flags |= FCF_ASYNC_SENT;
298 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
302 sp->type = SRB_ADISC_CMD;
304 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
306 lio = &sp->u.iocb_cmd;
307 lio->timeout = qla2x00_async_iocb_timeout;
308 sp->done = qla2x00_async_adisc_sp_done;
309 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
310 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
311 rval = qla2x00_start_sp(sp);
312 if (rval != QLA_SUCCESS)
315 ql_dbg(ql_dbg_disc, vha, 0x206f,
316 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
317 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
318 fcport->d_id.b.area, fcport->d_id.b.al_pa);
324 fcport->flags &= ~FCF_ASYNC_SENT;
328 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
329 struct event_arg *ea)
331 fc_port_t *fcport, *conflict_fcport;
332 struct get_name_list_extended *e;
333 u16 i, n, found = 0, loop_id;
336 u8 opt = 0, current_login_state;
340 if (ea->rc) { /* rval */
341 if (fcport->login_retry == 0) {
342 fcport->login_retry = vha->hw->login_retry_count;
343 ql_dbg(ql_dbg_disc, vha, 0x20de,
344 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
345 fcport->port_name, fcport->login_retry);
350 if (fcport->last_rscn_gen != fcport->rscn_gen) {
351 ql_dbg(ql_dbg_disc, vha, 0x20df,
352 "%s %8phC rscn gen changed rscn %d|%d \n",
353 __func__, fcport->port_name,
354 fcport->last_rscn_gen, fcport->rscn_gen);
355 qla24xx_post_gidpn_work(vha, fcport);
357 } else if (fcport->last_login_gen != fcport->login_gen) {
358 ql_dbg(ql_dbg_disc, vha, 0x20e0,
359 "%s %8phC login gen changed login %d|%d\n",
360 __func__, fcport->port_name,
361 fcport->last_login_gen, fcport->login_gen);
365 n = ea->data[0] / sizeof(struct get_name_list_extended);
367 ql_dbg(ql_dbg_disc, vha, 0x20e1,
368 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
369 __func__, __LINE__, fcport->port_name, n,
370 fcport->d_id.b.domain, fcport->d_id.b.area,
371 fcport->d_id.b.al_pa, fcport->loop_id);
373 for (i = 0; i < n; i++) {
375 wwn = wwn_to_u64(e->port_name);
377 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
381 id.b.domain = e->port_id[2];
382 id.b.area = e->port_id[1];
383 id.b.al_pa = e->port_id[0];
386 loop_id = le16_to_cpu(e->nport_handle);
387 loop_id = (loop_id & 0x7fff);
389 ql_dbg(ql_dbg_disc, vha, 0x20e2,
390 "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
391 __func__, fcport->port_name,
392 e->current_login_state, fcport->fw_login_state,
393 id.b.domain, id.b.area, id.b.al_pa,
394 fcport->d_id.b.domain, fcport->d_id.b.area,
395 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
397 if ((id.b24 != fcport->d_id.b24) ||
398 ((fcport->loop_id != FC_NO_LOOP_ID) &&
399 (fcport->loop_id != loop_id))) {
400 ql_dbg(ql_dbg_disc, vha, 0x20e3,
401 "%s %d %8phC post del sess\n",
402 __func__, __LINE__, fcport->port_name);
403 qlt_schedule_sess_for_deletion(fcport, 1);
407 fcport->loop_id = loop_id;
409 wwn = wwn_to_u64(fcport->port_name);
410 qlt_find_sess_invalidate_other(vha, wwn,
411 id, loop_id, &conflict_fcport);
413 if (conflict_fcport) {
415 * Another share fcport share the same loop_id &
416 * nport id. Conflict fcport needs to finish
417 * cleanup before this fcport can proceed to login.
419 conflict_fcport->conflict = fcport;
420 fcport->login_pause = 1;
423 if (fcport->fc4f_nvme)
424 current_login_state = e->current_login_state >> 4;
426 current_login_state = e->current_login_state & 0xf;
428 switch (current_login_state) {
429 case DSC_LS_PRLI_COMP:
430 ql_dbg(ql_dbg_disc, vha, 0x20e4,
431 "%s %d %8phC post gpdb\n",
432 __func__, __LINE__, fcport->port_name);
433 opt = PDO_FORCE_ADISC;
434 qla24xx_post_gpdb_work(vha, fcport, opt);
436 case DSC_LS_PORT_UNAVAIL:
438 if (fcport->loop_id == FC_NO_LOOP_ID) {
439 qla2x00_find_new_loop_id(vha, fcport);
440 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
442 ql_dbg(ql_dbg_disc, vha, 0x20e5,
444 __func__, __LINE__, fcport->port_name);
445 qla24xx_fcport_handle_login(vha, fcport);
451 /* fw has no record of this port */
452 if (fcport->loop_id == FC_NO_LOOP_ID) {
453 qla2x00_find_new_loop_id(vha, fcport);
454 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
456 for (i = 0; i < n; i++) {
458 id.b.domain = e->port_id[0];
459 id.b.area = e->port_id[1];
460 id.b.al_pa = e->port_id[2];
462 loop_id = le16_to_cpu(e->nport_handle);
464 if (fcport->d_id.b24 == id.b24) {
466 qla2x00_find_fcport_by_wwpn(vha,
469 ql_dbg(ql_dbg_disc, vha, 0x20e6,
470 "%s %d %8phC post del sess\n",
472 conflict_fcport->port_name);
473 qlt_schedule_sess_for_deletion
474 (conflict_fcport, 1);
477 if (fcport->loop_id == loop_id) {
478 /* FW already picked this loop id for another fcport */
479 qla2x00_find_new_loop_id(vha, fcport);
483 qla24xx_fcport_handle_login(vha, fcport);
488 qla24xx_async_gnl_sp_done(void *s, int res)
491 struct scsi_qla_host *vha = sp->vha;
493 struct fc_port *fcport = NULL, *tf;
494 u16 i, n = 0, loop_id;
496 struct get_name_list_extended *e;
500 ql_dbg(ql_dbg_disc, vha, 0x20e7,
501 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
502 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
503 sp->u.iocb_cmd.u.mbx.in_mb[2]);
505 memset(&ea, 0, sizeof(ea));
508 ea.event = FCME_GNL_DONE;
510 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
511 sizeof(struct get_name_list_extended)) {
512 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
513 sizeof(struct get_name_list_extended);
514 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
517 for (i = 0; i < n; i++) {
519 loop_id = le16_to_cpu(e->nport_handle);
520 /* mask out reserve bit */
521 loop_id = (loop_id & 0x7fff);
522 set_bit(loop_id, vha->hw->loop_id_map);
523 wwn = wwn_to_u64(e->port_name);
525 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
526 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
527 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
528 e->port_id[0], e->current_login_state, e->last_login_state,
532 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
537 if (!list_empty(&vha->gnl.fcports))
538 list_splice_init(&vha->gnl.fcports, &h);
540 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
541 list_del_init(&fcport->gnl_entry);
542 fcport->flags &= ~FCF_ASYNC_SENT;
545 qla2x00_fcport_event_handler(vha, &ea);
548 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
553 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
556 struct srb_iocb *mbx;
557 int rval = QLA_FUNCTION_FAILED;
561 if (!vha->flags.online)
564 ql_dbg(ql_dbg_disc, vha, 0x20d9,
565 "Async-gnlist WWPN %8phC \n", fcport->port_name);
567 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
568 fcport->flags |= FCF_ASYNC_SENT;
569 fcport->disc_state = DSC_GNL;
570 fcport->last_rscn_gen = fcport->rscn_gen;
571 fcport->last_login_gen = fcport->login_gen;
573 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
575 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
580 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
582 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
585 sp->type = SRB_MB_IOCB;
587 sp->gen1 = fcport->rscn_gen;
588 sp->gen2 = fcport->login_gen;
590 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
592 mb = sp->u.iocb_cmd.u.mbx.out_mb;
593 mb[0] = MBC_PORT_NODE_NAME_LIST;
594 mb[1] = BIT_2 | BIT_3;
595 mb[2] = MSW(vha->gnl.ldma);
596 mb[3] = LSW(vha->gnl.ldma);
597 mb[6] = MSW(MSD(vha->gnl.ldma));
598 mb[7] = LSW(MSD(vha->gnl.ldma));
599 mb[8] = vha->gnl.size;
602 mbx = &sp->u.iocb_cmd;
603 mbx->timeout = qla2x00_async_iocb_timeout;
605 sp->done = qla24xx_async_gnl_sp_done;
607 rval = qla2x00_start_sp(sp);
608 if (rval != QLA_SUCCESS)
611 ql_dbg(ql_dbg_disc, vha, 0x20da,
612 "Async-%s - OUT WWPN %8phC hndl %x\n",
613 sp->name, fcport->port_name, sp->handle);
620 fcport->flags &= ~FCF_ASYNC_SENT;
624 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
626 struct qla_work_evt *e;
628 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
630 return QLA_FUNCTION_FAILED;
632 e->u.fcport.fcport = fcport;
633 return qla2x00_post_work(vha, e);
637 void qla24xx_async_gpdb_sp_done(void *s, int res)
640 struct scsi_qla_host *vha = sp->vha;
641 struct qla_hw_data *ha = vha->hw;
642 struct port_database_24xx *pd;
643 fc_port_t *fcport = sp->fcport;
644 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
645 int rval = QLA_SUCCESS;
648 ql_dbg(ql_dbg_disc, vha, 0x20db,
649 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
650 sp->name, res, fcport->port_name, mb[1], mb[2]);
652 fcport->flags &= ~FCF_ASYNC_SENT;
659 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
661 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
664 memset(&ea, 0, sizeof(ea));
665 ea.event = FCME_GPDB_DONE;
670 qla2x00_fcport_event_handler(vha, &ea);
672 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
673 sp->u.iocb_cmd.u.mbx.in_dma);
678 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
680 struct qla_work_evt *e;
682 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
684 return QLA_FUNCTION_FAILED;
686 e->u.fcport.fcport = fcport;
688 return qla2x00_post_work(vha, e);
692 qla2x00_async_prli_sp_done(void *ptr, int res)
695 struct scsi_qla_host *vha = sp->vha;
696 struct srb_iocb *lio = &sp->u.iocb_cmd;
699 ql_dbg(ql_dbg_disc, vha, 0x2129,
700 "%s %8phC res %d \n", __func__,
701 sp->fcport->port_name, res);
703 sp->fcport->flags &= ~FCF_ASYNC_SENT;
705 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
706 memset(&ea, 0, sizeof(ea));
707 ea.event = FCME_PRLI_DONE;
708 ea.fcport = sp->fcport;
709 ea.data[0] = lio->u.logio.data[0];
710 ea.data[1] = lio->u.logio.data[1];
711 ea.iop[0] = lio->u.logio.iop[0];
712 ea.iop[1] = lio->u.logio.iop[1];
715 qla2x00_fcport_event_handler(vha, &ea);
722 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
725 struct srb_iocb *lio;
726 int rval = QLA_FUNCTION_FAILED;
728 if (!vha->flags.online)
731 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
732 fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
733 fcport->fw_login_state == DSC_LS_PRLI_PEND)
736 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
740 fcport->flags |= FCF_ASYNC_SENT;
741 fcport->logout_completed = 0;
743 sp->type = SRB_PRLI_CMD;
745 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
747 lio = &sp->u.iocb_cmd;
748 lio->timeout = qla2x00_async_iocb_timeout;
749 sp->done = qla2x00_async_prli_sp_done;
750 lio->u.logio.flags = 0;
752 if (fcport->fc4f_nvme)
753 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
755 rval = qla2x00_start_sp(sp);
756 if (rval != QLA_SUCCESS) {
757 fcport->flags &= ~FCF_ASYNC_SENT;
758 fcport->flags |= FCF_LOGIN_NEEDED;
759 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
763 ql_dbg(ql_dbg_disc, vha, 0x211b,
764 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
765 fcport->port_name, sp->handle, fcport->loop_id,
766 fcport->d_id.b24, fcport->login_retry);
772 fcport->flags &= ~FCF_ASYNC_SENT;
776 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
778 struct qla_work_evt *e;
780 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
782 return QLA_FUNCTION_FAILED;
784 e->u.fcport.fcport = fcport;
785 e->u.fcport.opt = opt;
786 return qla2x00_post_work(vha, e);
789 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
792 struct srb_iocb *mbx;
793 int rval = QLA_FUNCTION_FAILED;
796 struct port_database_24xx *pd;
797 struct qla_hw_data *ha = vha->hw;
799 if (!vha->flags.online)
802 fcport->flags |= FCF_ASYNC_SENT;
803 fcport->disc_state = DSC_GPDB;
805 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
809 sp->type = SRB_MB_IOCB;
811 sp->gen1 = fcport->rscn_gen;
812 sp->gen2 = fcport->login_gen;
813 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
815 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
817 ql_log(ql_log_warn, vha, 0xd043,
818 "Failed to allocate port database structure.\n");
822 mb = sp->u.iocb_cmd.u.mbx.out_mb;
823 mb[0] = MBC_GET_PORT_DATABASE;
824 mb[1] = fcport->loop_id;
827 mb[6] = MSW(MSD(pd_dma));
828 mb[7] = LSW(MSD(pd_dma));
832 mbx = &sp->u.iocb_cmd;
833 mbx->timeout = qla2x00_async_iocb_timeout;
834 mbx->u.mbx.in = (void *)pd;
835 mbx->u.mbx.in_dma = pd_dma;
837 sp->done = qla24xx_async_gpdb_sp_done;
839 rval = qla2x00_start_sp(sp);
840 if (rval != QLA_SUCCESS)
843 ql_dbg(ql_dbg_disc, vha, 0x20dc,
844 "Async-%s %8phC hndl %x opt %x\n",
845 sp->name, fcport->port_name, sp->handle, opt);
851 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
855 fcport->flags &= ~FCF_ASYNC_SENT;
856 qla24xx_post_gpdb_work(vha, fcport, opt);
861 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
864 fc_port_t *fcport = ea->fcport;
866 u16 opt = ea->sp->u.iocb_cmd.u.mbx.out_mb[10];
868 fcport->flags &= ~FCF_ASYNC_SENT;
870 ql_dbg(ql_dbg_disc, vha, 0x20d2,
871 "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
872 fcport->disc_state, fcport->fw_login_state, rval);
874 if (ea->sp->gen2 != fcport->login_gen) {
875 /* target side must have changed it. */
876 ql_dbg(ql_dbg_disc, vha, 0x20d3,
877 "%s %8phC generation changed rscn %d|%d login %d|%d \n",
878 __func__, fcport->port_name, fcport->last_rscn_gen,
879 fcport->rscn_gen, fcport->last_login_gen,
882 } else if (ea->sp->gen1 != fcport->rscn_gen) {
883 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
884 __func__, __LINE__, fcport->port_name);
885 qla24xx_post_gidpn_work(vha, fcport);
889 if (rval != QLA_SUCCESS) {
890 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
891 __func__, __LINE__, fcport->port_name);
892 qlt_schedule_sess_for_deletion_lock(fcport);
896 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
897 if (opt != PDO_FORCE_ADISC)
898 ea->fcport->login_gen++;
899 ea->fcport->deleted = 0;
900 ea->fcport->logout_on_delete = 1;
902 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
904 ea->fcport->login_succ = 1;
906 if (!IS_IIDMA_CAPABLE(vha->hw) ||
907 !vha->hw->flags.gpsc_supported) {
908 ql_dbg(ql_dbg_disc, vha, 0x20d6,
909 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
910 __func__, __LINE__, fcport->port_name,
913 qla24xx_post_upd_fcport_work(vha, fcport);
915 ql_dbg(ql_dbg_disc, vha, 0x20d7,
916 "%s %d %8phC post gpsc fcp_cnt %d\n",
917 __func__, __LINE__, fcport->port_name,
920 qla24xx_post_gpsc_work(vha, fcport);
922 } else if (ea->fcport->login_succ) {
924 * We have an existing session. A late RSCN delivery
925 * must have triggered the session to be re-validate.
926 * session is still valid.
928 ql_dbg(ql_dbg_disc, vha, 0x20d6,
929 "%s %d %8phC session revalidate success\n",
930 __func__, __LINE__, fcport->port_name);
931 fcport->disc_state = DSC_LOGIN_COMPLETE;
933 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
936 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
938 if (fcport->login_retry == 0)
941 if (fcport->scan_state != QLA_FCPORT_FOUND)
944 ql_dbg(ql_dbg_disc, vha, 0x20d8,
945 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
946 __func__, fcport->port_name, fcport->disc_state,
947 fcport->fw_login_state, fcport->login_pause, fcport->flags,
948 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
949 fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
952 fcport->login_retry--;
954 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
955 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
958 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
959 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
963 /* for pure Target Mode. Login will not be initiated */
964 if (vha->host->active_mode == MODE_TARGET)
967 if (fcport->flags & FCF_ASYNC_SENT) {
968 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
972 switch (fcport->disc_state) {
974 if (fcport->loop_id == FC_NO_LOOP_ID) {
975 ql_dbg(ql_dbg_disc, vha, 0x20bd,
976 "%s %d %8phC post gnl\n",
977 __func__, __LINE__, fcport->port_name);
978 qla24xx_post_gnl_work(vha, fcport);
980 ql_dbg(ql_dbg_disc, vha, 0x20bf,
981 "%s %d %8phC post login\n",
982 __func__, __LINE__, fcport->port_name);
983 fcport->disc_state = DSC_LOGIN_PEND;
984 qla2x00_post_async_login_work(vha, fcport, NULL);
989 if (fcport->login_pause) {
990 fcport->last_rscn_gen = fcport->rscn_gen;
991 fcport->last_login_gen = fcport->login_gen;
992 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
996 if (fcport->flags & FCF_FCP2_DEVICE) {
997 u8 opt = PDO_FORCE_ADISC;
999 ql_dbg(ql_dbg_disc, vha, 0x20c9,
1000 "%s %d %8phC post gpdb\n",
1001 __func__, __LINE__, fcport->port_name);
1003 fcport->disc_state = DSC_GPDB;
1004 qla24xx_post_gpdb_work(vha, fcport, opt);
1006 ql_dbg(ql_dbg_disc, vha, 0x20cf,
1007 "%s %d %8phC post login\n",
1008 __func__, __LINE__, fcport->port_name);
1009 fcport->disc_state = DSC_LOGIN_PEND;
1010 qla2x00_post_async_login_work(vha, fcport, NULL);
1015 case DSC_LOGIN_FAILED:
1016 ql_dbg(ql_dbg_disc, vha, 0x20d0,
1017 "%s %d %8phC post gidpn\n",
1018 __func__, __LINE__, fcport->port_name);
1020 qla24xx_post_gidpn_work(vha, fcport);
1023 case DSC_LOGIN_COMPLETE:
1024 /* recheck login state */
1025 ql_dbg(ql_dbg_disc, vha, 0x20d1,
1026 "%s %d %8phC post gpdb\n",
1027 __func__, __LINE__, fcport->port_name);
1029 qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
1040 void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
1044 ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
1045 "%s %8phC DS %d LS %d\n",
1046 __func__, fcport->port_name, fcport->disc_state,
1047 fcport->fw_login_state);
1049 if (fcport->flags & FCF_ASYNC_SENT)
1052 switch (fcport->disc_state) {
1054 case DSC_LOGIN_COMPLETE:
1055 qla24xx_post_gpnid_work(fcport->vha, &ea->id);
1062 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1063 u8 *port_name, void *pla)
1065 struct qla_work_evt *e;
1066 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1068 return QLA_FUNCTION_FAILED;
1070 e->u.new_sess.id = *id;
1071 e->u.new_sess.pla = pla;
1072 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1074 return qla2x00_post_work(vha, e);
1078 int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
1079 struct event_arg *ea)
1081 fc_port_t *fcport = ea->fcport;
1083 if (test_bit(UNLOADING, &vha->dpc_flags))
1086 switch (vha->host->active_mode) {
1087 case MODE_INITIATOR:
1089 if (fcport->scan_state == QLA_FCPORT_FOUND)
1090 qla24xx_fcport_handle_login(vha, fcport);
1103 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1104 struct event_arg *ea)
1106 fc_port_t *fcport = ea->fcport;
1108 if (fcport->scan_state != QLA_FCPORT_FOUND) {
1109 fcport->login_retry++;
1113 ql_dbg(ql_dbg_disc, vha, 0x2102,
1114 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1115 __func__, fcport->port_name, fcport->disc_state,
1116 fcport->fw_login_state, fcport->login_pause,
1117 fcport->deleted, fcport->conflict,
1118 fcport->last_rscn_gen, fcport->rscn_gen,
1119 fcport->last_login_gen, fcport->login_gen,
1122 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1123 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1126 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1127 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
1131 if (fcport->flags & FCF_ASYNC_SENT) {
1132 fcport->login_retry++;
1133 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1137 if (fcport->disc_state == DSC_DELETE_PEND) {
1138 fcport->login_retry++;
1142 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1143 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
1144 __func__, __LINE__, fcport->port_name);
1146 qla24xx_post_gidpn_work(vha, fcport);
1150 qla24xx_fcport_handle_login(vha, fcport);
1153 void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1155 fc_port_t *fcport, *f, *tf;
1156 uint32_t id = 0, mask, rid;
1159 switch (ea->event) {
1162 case FCME_GIDPN_DONE:
1163 case FCME_GPSC_DONE:
1164 case FCME_GPNID_DONE:
1165 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
1166 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1173 switch (ea->event) {
1175 if (test_bit(UNLOADING, &vha->dpc_flags))
1178 qla24xx_handle_relogin_event(vha, ea);
1181 if (test_bit(UNLOADING, &vha->dpc_flags))
1183 switch (ea->id.b.rsvd_1) {
1184 case RSCN_PORT_ADDR:
1185 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1188 rc = qla24xx_post_gpnid_work(vha, &ea->id);
1190 ql_log(ql_log_warn, vha, 0xd044,
1191 "RSCN GPNID work failed %02x%02x%02x\n",
1192 ea->id.b.domain, ea->id.b.area,
1196 ea->fcport = fcport;
1197 qla24xx_handle_rscn_event(fcport, ea);
1200 case RSCN_AREA_ADDR:
1202 if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
1204 ql_dbg(ql_dbg_async, vha, 0x5044,
1205 "RSCN: Area 0x%06x was affected\n",
1209 ql_dbg(ql_dbg_async, vha, 0x507a,
1210 "RSCN: Domain 0x%06x was affected\n",
1214 rid = ea->id.b24 & mask;
1215 list_for_each_entry_safe(f, tf, &vha->vp_fcports,
1217 id = f->d_id.b24 & mask;
1220 qla24xx_handle_rscn_event(f, ea);
1226 ql_log(ql_log_warn, vha, 0xd045,
1227 "RSCN: Fabric was affected. Addr format %d\n",
1229 qla2x00_mark_all_devices_lost(vha, 1);
1230 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1231 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1234 case FCME_GIDPN_DONE:
1235 qla24xx_handle_gidpn_event(vha, ea);
1238 qla24xx_handle_gnl_done_event(vha, ea);
1240 case FCME_GPSC_DONE:
1241 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1243 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1244 qla24xx_handle_plogi_done_event(vha, ea);
1246 case FCME_PRLI_DONE:
1247 qla24xx_handle_prli_done_event(vha, ea);
1249 case FCME_GPDB_DONE:
1250 qla24xx_handle_gpdb_event(vha, ea);
1252 case FCME_GPNID_DONE:
1253 qla24xx_handle_gpnid_event(vha, ea);
1255 case FCME_GFFID_DONE:
1256 qla24xx_handle_gffid_event(vha, ea);
1258 case FCME_DELETE_DONE:
1259 qla24xx_handle_delete_done_event(vha, ea);
1268 qla2x00_tmf_iocb_timeout(void *data)
1271 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1273 tmf->u.tmf.comp_status = CS_TIMEOUT;
1274 complete(&tmf->u.tmf.comp);
1278 qla2x00_tmf_sp_done(void *ptr, int res)
1281 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1283 complete(&tmf->u.tmf.comp);
1287 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1290 struct scsi_qla_host *vha = fcport->vha;
1291 struct srb_iocb *tm_iocb;
1293 int rval = QLA_FUNCTION_FAILED;
1295 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1299 tm_iocb = &sp->u.iocb_cmd;
1300 sp->type = SRB_TM_CMD;
1302 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1303 tm_iocb->u.tmf.flags = flags;
1304 tm_iocb->u.tmf.lun = lun;
1305 tm_iocb->u.tmf.data = tag;
1306 sp->done = qla2x00_tmf_sp_done;
1307 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1308 init_completion(&tm_iocb->u.tmf.comp);
1310 rval = qla2x00_start_sp(sp);
1311 if (rval != QLA_SUCCESS)
1314 ql_dbg(ql_dbg_taskm, vha, 0x802f,
1315 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1316 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1317 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1319 wait_for_completion(&tm_iocb->u.tmf.comp);
1321 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
1322 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1324 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
1325 ql_dbg(ql_dbg_taskm, vha, 0x8030,
1326 "TM IOCB failed (%x).\n", rval);
1329 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1330 flags = tm_iocb->u.tmf.flags;
1331 lun = (uint16_t)tm_iocb->u.tmf.lun;
1333 /* Issue Marker IOCB */
1334 qla2x00_marker(vha, vha->hw->req_q_map[0],
1335 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
1336 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1346 qla24xx_abort_iocb_timeout(void *data)
1349 struct srb_iocb *abt = &sp->u.iocb_cmd;
1351 abt->u.abt.comp_status = CS_TIMEOUT;
1352 complete(&abt->u.abt.comp);
1356 qla24xx_abort_sp_done(void *ptr, int res)
1359 struct srb_iocb *abt = &sp->u.iocb_cmd;
1361 complete(&abt->u.abt.comp);
1365 qla24xx_async_abort_cmd(srb_t *cmd_sp)
1367 scsi_qla_host_t *vha = cmd_sp->vha;
1368 fc_port_t *fcport = cmd_sp->fcport;
1369 struct srb_iocb *abt_iocb;
1371 int rval = QLA_FUNCTION_FAILED;
1373 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1377 abt_iocb = &sp->u.iocb_cmd;
1378 sp->type = SRB_ABT_CMD;
1380 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1381 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1382 sp->done = qla24xx_abort_sp_done;
1383 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1384 init_completion(&abt_iocb->u.abt.comp);
1386 rval = qla2x00_start_sp(sp);
1387 if (rval != QLA_SUCCESS)
1390 ql_dbg(ql_dbg_async, vha, 0x507c,
1391 "Abort command issued - hdl=%x, target_id=%x\n",
1392 cmd_sp->handle, fcport->tgt_id);
1394 wait_for_completion(&abt_iocb->u.abt.comp);
1396 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1397 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1406 qla24xx_async_abort_command(srb_t *sp)
1408 unsigned long flags = 0;
1411 fc_port_t *fcport = sp->fcport;
1412 struct scsi_qla_host *vha = fcport->vha;
1413 struct qla_hw_data *ha = vha->hw;
1414 struct req_que *req = vha->req;
1416 spin_lock_irqsave(&ha->hardware_lock, flags);
1417 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1418 if (req->outstanding_cmds[handle] == sp)
1421 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1422 if (handle == req->num_outstanding_cmds) {
1423 /* Command not found. */
1424 return QLA_FUNCTION_FAILED;
1426 if (sp->type == SRB_FXIOCB_DCMD)
1427 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1428 FXDISC_ABORT_IOCTL);
1430 return qla24xx_async_abort_cmd(sp);
1434 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1436 switch (ea->data[0]) {
1437 case MBS_COMMAND_COMPLETE:
1438 ql_dbg(ql_dbg_disc, vha, 0x2118,
1439 "%s %d %8phC post gpdb\n",
1440 __func__, __LINE__, ea->fcport->port_name);
1442 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1443 ea->fcport->logout_on_delete = 1;
1444 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1447 if (ea->fcport->n2n_flag) {
1448 ql_dbg(ql_dbg_disc, vha, 0x2118,
1449 "%s %d %8phC post fc4 prli\n",
1450 __func__, __LINE__, ea->fcport->port_name);
1451 ea->fcport->fc4f_nvme = 0;
1452 ea->fcport->n2n_flag = 0;
1453 qla24xx_post_prli_work(vha, ea->fcport);
1455 ql_dbg(ql_dbg_disc, vha, 0x2119,
1456 "%s %d %8phC unhandle event of %x\n",
1457 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1463 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1465 port_id_t cid; /* conflict Nport id */
1467 struct fc_port *conflict_fcport;
1469 switch (ea->data[0]) {
1470 case MBS_COMMAND_COMPLETE:
1472 * Driver must validate login state - If PRLI not complete,
1473 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1476 if (ea->fcport->fc4f_nvme) {
1477 ql_dbg(ql_dbg_disc, vha, 0x2117,
1478 "%s %d %8phC post prli\n",
1479 __func__, __LINE__, ea->fcport->port_name);
1480 qla24xx_post_prli_work(vha, ea->fcport);
1482 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1483 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1484 __func__, __LINE__, ea->fcport->port_name,
1485 ea->fcport->loop_id, ea->fcport->d_id.b24);
1487 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1488 ea->fcport->loop_id = FC_NO_LOOP_ID;
1489 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1490 ea->fcport->logout_on_delete = 1;
1491 ea->fcport->send_els_logo = 0;
1492 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1495 case MBS_COMMAND_ERROR:
1496 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
1497 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1499 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1500 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1501 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
1502 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1504 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
1506 case MBS_LOOP_ID_USED:
1507 /* data[1] = IO PARAM 1 = nport ID */
1508 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1509 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1510 cid.b.al_pa = ea->iop[1] & 0xff;
1513 ql_dbg(ql_dbg_disc, vha, 0x20ec,
1514 "%s %d %8phC LoopID 0x%x in use post gnl\n",
1515 __func__, __LINE__, ea->fcport->port_name,
1516 ea->fcport->loop_id);
1518 if (IS_SW_RESV_ADDR(cid)) {
1519 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1520 ea->fcport->loop_id = FC_NO_LOOP_ID;
1522 qla2x00_clear_loop_id(ea->fcport);
1524 qla24xx_post_gnl_work(vha, ea->fcport);
1526 case MBS_PORT_ID_USED:
1527 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1528 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1529 __func__, __LINE__, ea->fcport->port_name,
1530 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1531 ea->fcport->d_id.b.al_pa);
1533 lid = ea->iop[1] & 0xffff;
1534 qlt_find_sess_invalidate_other(vha,
1535 wwn_to_u64(ea->fcport->port_name),
1536 ea->fcport->d_id, lid, &conflict_fcport);
1538 if (conflict_fcport) {
1540 * Another fcport share the same loop_id/nport id.
1541 * Conflict fcport needs to finish cleanup before this
1542 * fcport can proceed to login.
1544 conflict_fcport->conflict = ea->fcport;
1545 ea->fcport->login_pause = 1;
1547 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1548 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
1549 __func__, __LINE__, ea->fcport->port_name,
1550 ea->fcport->d_id.b24, lid);
1551 qla2x00_clear_loop_id(ea->fcport);
1552 qla24xx_post_gidpn_work(vha, ea->fcport);
1554 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1555 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
1556 __func__, __LINE__, ea->fcport->port_name,
1557 ea->fcport->d_id.b24, lid);
1559 qla2x00_clear_loop_id(ea->fcport);
1560 set_bit(lid, vha->hw->loop_id_map);
1561 ea->fcport->loop_id = lid;
1562 ea->fcport->keep_nport_handle = 0;
1563 qlt_schedule_sess_for_deletion(ea->fcport, false);
1571 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1574 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1575 qlt_logo_completion_handler(fcport, data[0]);
1576 fcport->login_gen++;
1581 qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1584 if (data[0] == MBS_COMMAND_COMPLETE) {
1585 qla2x00_update_fcport(vha, fcport);
1591 fcport->flags &= ~FCF_ASYNC_SENT;
1592 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1593 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1595 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1600 /****************************************************************************/
1601 /* QLogic ISP2x00 Hardware Support Functions. */
1602 /****************************************************************************/
1605 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
1607 int rval = QLA_SUCCESS;
1608 struct qla_hw_data *ha = vha->hw;
1609 uint32_t idc_major_ver, idc_minor_ver;
1612 qla83xx_idc_lock(vha, 0);
1614 /* SV: TODO: Assign initialization timeout from
1615 * flash-info / other param
1617 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
1618 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
1620 /* Set our fcoe function presence */
1621 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
1622 ql_dbg(ql_dbg_p3p, vha, 0xb077,
1623 "Error while setting DRV-Presence.\n");
1624 rval = QLA_FUNCTION_FAILED;
1628 /* Decide the reset ownership */
1629 qla83xx_reset_ownership(vha);
1632 * On first protocol driver load:
1633 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
1635 * Others: Check compatibility with current IDC Major version.
1637 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
1638 if (ha->flags.nic_core_reset_owner) {
1639 /* Set IDC Major version */
1640 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
1641 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
1643 /* Clearing IDC-Lock-Recovery register */
1644 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
1645 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
1647 * Clear further IDC participation if we are not compatible with
1648 * the current IDC Major Version.
1650 ql_log(ql_log_warn, vha, 0xb07d,
1651 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
1652 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
1653 __qla83xx_clear_drv_presence(vha);
1654 rval = QLA_FUNCTION_FAILED;
1657 /* Each function sets its supported Minor version. */
1658 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
1659 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
1660 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
1662 if (ha->flags.nic_core_reset_owner) {
1663 memset(config, 0, sizeof(config));
1664 if (!qla81xx_get_port_config(vha, config))
1665 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
1669 rval = qla83xx_idc_state_handler(vha);
1672 qla83xx_idc_unlock(vha, 0);
1678 * qla2x00_initialize_adapter
1682 * ha = adapter block pointer.
1688 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
1691 struct qla_hw_data *ha = vha->hw;
1692 struct req_que *req = ha->req_q_map[0];
1694 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
1695 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
1697 /* Clear adapter flags. */
1698 vha->flags.online = 0;
1699 ha->flags.chip_reset_done = 0;
1700 vha->flags.reset_active = 0;
1701 ha->flags.pci_channel_io_perm_failure = 0;
1702 ha->flags.eeh_busy = 0;
1703 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
1704 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1705 atomic_set(&vha->loop_state, LOOP_DOWN);
1706 vha->device_flags = DFLG_NO_CABLE;
1708 vha->flags.management_server_logged_in = 0;
1709 vha->marker_needed = 0;
1710 ha->isp_abort_cnt = 0;
1711 ha->beacon_blink_led = 0;
1713 set_bit(0, ha->req_qid_map);
1714 set_bit(0, ha->rsp_qid_map);
1716 ql_dbg(ql_dbg_init, vha, 0x0040,
1717 "Configuring PCI space...\n");
1718 rval = ha->isp_ops->pci_config(vha);
1720 ql_log(ql_log_warn, vha, 0x0044,
1721 "Unable to configure PCI space.\n");
1725 ha->isp_ops->reset_chip(vha);
1727 rval = qla2xxx_get_flash_info(vha);
1729 ql_log(ql_log_fatal, vha, 0x004f,
1730 "Unable to validate FLASH data.\n");
1734 if (IS_QLA8044(ha)) {
1735 qla8044_read_reset_template(vha);
1737 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
1738 * If DONRESET_BIT0 is set, drivers should not set dev_state
1739 * to NEED_RESET. But if NEED_RESET is set, drivers should
1740 * should honor the reset. */
1741 if (ql2xdontresethba == 1)
1742 qla8044_set_idc_dontreset(vha);
1745 ha->isp_ops->get_flash_version(vha, req->ring);
1746 ql_dbg(ql_dbg_init, vha, 0x0061,
1747 "Configure NVRAM parameters...\n");
1749 ha->isp_ops->nvram_config(vha);
1751 if (ha->flags.disable_serdes) {
1752 /* Mask HBA via NVRAM settings? */
1753 ql_log(ql_log_info, vha, 0x0077,
1754 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
1755 return QLA_FUNCTION_FAILED;
1758 ql_dbg(ql_dbg_init, vha, 0x0078,
1759 "Verifying loaded RISC code...\n");
1761 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
1762 rval = ha->isp_ops->chip_diag(vha);
1765 rval = qla2x00_setup_chip(vha);
1770 if (IS_QLA84XX(ha)) {
1771 ha->cs84xx = qla84xx_get_chip(vha);
1773 ql_log(ql_log_warn, vha, 0x00d0,
1774 "Unable to configure ISP84XX.\n");
1775 return QLA_FUNCTION_FAILED;
1779 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
1780 rval = qla2x00_init_rings(vha);
1782 ha->flags.chip_reset_done = 1;
1784 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
1785 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
1786 rval = qla84xx_init_chip(vha);
1787 if (rval != QLA_SUCCESS) {
1788 ql_log(ql_log_warn, vha, 0x00d4,
1789 "Unable to initialize ISP84XX.\n");
1790 qla84xx_put_chip(vha);
1794 /* Load the NIC Core f/w if we are the first protocol driver. */
1795 if (IS_QLA8031(ha)) {
1796 rval = qla83xx_nic_core_fw_load(vha);
1798 ql_log(ql_log_warn, vha, 0x0124,
1799 "Error in initializing NIC Core f/w.\n");
1802 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
1803 qla24xx_read_fcp_prio_cfg(vha);
1805 if (IS_P3P_TYPE(ha))
1806 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
1808 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
1814 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
1817 * Returns 0 on success.
1820 qla2100_pci_config(scsi_qla_host_t *vha)
1823 unsigned long flags;
1824 struct qla_hw_data *ha = vha->hw;
1825 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1827 pci_set_master(ha->pdev);
1828 pci_try_set_mwi(ha->pdev);
1830 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1831 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1832 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1834 pci_disable_rom(ha->pdev);
1836 /* Get PCI bus information. */
1837 spin_lock_irqsave(&ha->hardware_lock, flags);
1838 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
1839 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1845 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
1848 * Returns 0 on success.
1851 qla2300_pci_config(scsi_qla_host_t *vha)
1854 unsigned long flags = 0;
1856 struct qla_hw_data *ha = vha->hw;
1857 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1859 pci_set_master(ha->pdev);
1860 pci_try_set_mwi(ha->pdev);
1862 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1863 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1865 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1866 w &= ~PCI_COMMAND_INTX_DISABLE;
1867 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1870 * If this is a 2300 card and not 2312, reset the
1871 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
1872 * the 2310 also reports itself as a 2300 so we need to get the
1873 * fb revision level -- a 6 indicates it really is a 2300 and
1876 if (IS_QLA2300(ha)) {
1877 spin_lock_irqsave(&ha->hardware_lock, flags);
1880 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
1881 for (cnt = 0; cnt < 30000; cnt++) {
1882 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0)
1888 /* Select FPM registers. */
1889 WRT_REG_WORD(®->ctrl_status, 0x20);
1890 RD_REG_WORD(®->ctrl_status);
1892 /* Get the fb rev level */
1893 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
1895 if (ha->fb_rev == FPM_2300)
1896 pci_clear_mwi(ha->pdev);
1898 /* Deselect FPM registers. */
1899 WRT_REG_WORD(®->ctrl_status, 0x0);
1900 RD_REG_WORD(®->ctrl_status);
1902 /* Release RISC module. */
1903 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
1904 for (cnt = 0; cnt < 30000; cnt++) {
1905 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0)
1911 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1914 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1916 pci_disable_rom(ha->pdev);
1918 /* Get PCI bus information. */
1919 spin_lock_irqsave(&ha->hardware_lock, flags);
1920 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
1921 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1927 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
1930 * Returns 0 on success.
1933 qla24xx_pci_config(scsi_qla_host_t *vha)
1936 unsigned long flags = 0;
1937 struct qla_hw_data *ha = vha->hw;
1938 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1940 pci_set_master(ha->pdev);
1941 pci_try_set_mwi(ha->pdev);
1943 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1944 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1945 w &= ~PCI_COMMAND_INTX_DISABLE;
1946 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1948 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
1950 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
1951 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
1952 pcix_set_mmrbc(ha->pdev, 2048);
1954 /* PCIe -- adjust Maximum Read Request Size (2048). */
1955 if (pci_is_pcie(ha->pdev))
1956 pcie_set_readrq(ha->pdev, 4096);
1958 pci_disable_rom(ha->pdev);
1960 ha->chip_revision = ha->pdev->revision;
1962 /* Get PCI bus information. */
1963 spin_lock_irqsave(&ha->hardware_lock, flags);
1964 ha->pci_attr = RD_REG_DWORD(®->ctrl_status);
1965 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1971 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
1974 * Returns 0 on success.
1977 qla25xx_pci_config(scsi_qla_host_t *vha)
1980 struct qla_hw_data *ha = vha->hw;
1982 pci_set_master(ha->pdev);
1983 pci_try_set_mwi(ha->pdev);
1985 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
1986 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1987 w &= ~PCI_COMMAND_INTX_DISABLE;
1988 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
1990 /* PCIe -- adjust Maximum Read Request Size (2048). */
1991 if (pci_is_pcie(ha->pdev))
1992 pcie_set_readrq(ha->pdev, 4096);
1994 pci_disable_rom(ha->pdev);
1996 ha->chip_revision = ha->pdev->revision;
2002 * qla2x00_isp_firmware() - Choose firmware image.
2005 * Returns 0 on success.
2008 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2011 uint16_t loop_id, topo, sw_cap;
2012 uint8_t domain, area, al_pa;
2013 struct qla_hw_data *ha = vha->hw;
2015 /* Assume loading risc code */
2016 rval = QLA_FUNCTION_FAILED;
2018 if (ha->flags.disable_risc_code_load) {
2019 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2021 /* Verify checksum of loaded RISC code. */
2022 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2023 if (rval == QLA_SUCCESS) {
2024 /* And, verify we are not in ROM code. */
2025 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2026 &area, &domain, &topo, &sw_cap);
2031 ql_dbg(ql_dbg_init, vha, 0x007a,
2032 "**** Load RISC code ****.\n");
2038 * qla2x00_reset_chip() - Reset ISP chip.
2041 * Returns 0 on success.
2044 qla2x00_reset_chip(scsi_qla_host_t *vha)
2046 unsigned long flags = 0;
2047 struct qla_hw_data *ha = vha->hw;
2048 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2052 if (unlikely(pci_channel_offline(ha->pdev)))
2055 ha->isp_ops->disable_intrs(ha);
2057 spin_lock_irqsave(&ha->hardware_lock, flags);
2059 /* Turn off master enable */
2061 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2062 cmd &= ~PCI_COMMAND_MASTER;
2063 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2065 if (!IS_QLA2100(ha)) {
2067 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
2068 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2069 for (cnt = 0; cnt < 30000; cnt++) {
2070 if ((RD_REG_WORD(®->hccr) &
2071 HCCR_RISC_PAUSE) != 0)
2076 RD_REG_WORD(®->hccr); /* PCI Posting. */
2080 /* Select FPM registers. */
2081 WRT_REG_WORD(®->ctrl_status, 0x20);
2082 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2084 /* FPM Soft Reset. */
2085 WRT_REG_WORD(®->fpm_diag_config, 0x100);
2086 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
2088 /* Toggle Fpm Reset. */
2089 if (!IS_QLA2200(ha)) {
2090 WRT_REG_WORD(®->fpm_diag_config, 0x0);
2091 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
2094 /* Select frame buffer registers. */
2095 WRT_REG_WORD(®->ctrl_status, 0x10);
2096 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2098 /* Reset frame buffer FIFOs. */
2099 if (IS_QLA2200(ha)) {
2100 WRT_FB_CMD_REG(ha, reg, 0xa000);
2101 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2103 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2105 /* Read back fb_cmd until zero or 3 seconds max */
2106 for (cnt = 0; cnt < 3000; cnt++) {
2107 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2113 /* Select RISC module registers. */
2114 WRT_REG_WORD(®->ctrl_status, 0);
2115 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2117 /* Reset RISC processor. */
2118 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2119 RD_REG_WORD(®->hccr); /* PCI Posting. */
2121 /* Release RISC processor. */
2122 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2123 RD_REG_WORD(®->hccr); /* PCI Posting. */
2126 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
2127 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT);
2129 /* Reset ISP chip. */
2130 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
2132 /* Wait for RISC to recover from reset. */
2133 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2135 * It is necessary to for a delay here since the card doesn't
2136 * respond to PCI reads during a reset. On some architectures
2137 * this will result in an MCA.
2140 for (cnt = 30000; cnt; cnt--) {
2141 if ((RD_REG_WORD(®->ctrl_status) &
2142 CSR_ISP_SOFT_RESET) == 0)
2149 /* Reset RISC processor. */
2150 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2152 WRT_REG_WORD(®->semaphore, 0);
2154 /* Release RISC processor. */
2155 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2156 RD_REG_WORD(®->hccr); /* PCI Posting. */
2158 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2159 for (cnt = 0; cnt < 30000; cnt++) {
2160 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2168 /* Turn on master enable */
2169 cmd |= PCI_COMMAND_MASTER;
2170 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2172 /* Disable RISC pause on FPM parity error. */
2173 if (!IS_QLA2100(ha)) {
2174 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
2175 RD_REG_WORD(®->hccr); /* PCI Posting. */
2178 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2182 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2184 * Returns 0 on success.
2187 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2189 uint16_t mb[4] = {0x1010, 0, 1, 0};
2191 if (!IS_QLA81XX(vha->hw))
2194 return qla81xx_write_mpi_register(vha, mb);
2198 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2201 * Returns 0 on success.
2204 qla24xx_reset_risc(scsi_qla_host_t *vha)
2206 unsigned long flags = 0;
2207 struct qla_hw_data *ha = vha->hw;
2208 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2211 static int abts_cnt; /* ISP abort retry counts */
2212 int rval = QLA_SUCCESS;
2214 spin_lock_irqsave(&ha->hardware_lock, flags);
2217 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2218 for (cnt = 0; cnt < 30000; cnt++) {
2219 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2225 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE))
2226 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2228 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2229 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2230 RD_REG_DWORD(®->hccr),
2231 RD_REG_DWORD(®->ctrl_status),
2232 (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE));
2234 WRT_REG_DWORD(®->ctrl_status,
2235 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2236 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
2240 /* Wait for firmware to complete NVRAM accesses. */
2241 RD_REG_WORD(®->mailbox0);
2242 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 &&
2243 rval == QLA_SUCCESS; cnt--) {
2248 rval = QLA_FUNCTION_TIMEOUT;
2251 if (rval == QLA_SUCCESS)
2252 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2254 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2255 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2256 RD_REG_DWORD(®->hccr),
2257 RD_REG_DWORD(®->mailbox0));
2259 /* Wait for soft-reset to complete. */
2260 RD_REG_DWORD(®->ctrl_status);
2261 for (cnt = 0; cnt < 60; cnt++) {
2263 if ((RD_REG_DWORD(®->ctrl_status) &
2264 CSRX_ISP_SOFT_RESET) == 0)
2269 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
2270 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2272 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2273 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2274 RD_REG_DWORD(®->hccr),
2275 RD_REG_DWORD(®->ctrl_status));
2277 /* If required, do an MPI FW reset now */
2278 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2279 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2280 if (++abts_cnt < 5) {
2281 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2282 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2285 * We exhausted the ISP abort retries. We have to
2286 * set the board offline.
2289 vha->flags.online = 0;
2294 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
2295 RD_REG_DWORD(®->hccr);
2297 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
2298 RD_REG_DWORD(®->hccr);
2300 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
2301 RD_REG_DWORD(®->hccr);
2303 RD_REG_WORD(®->mailbox0);
2304 for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 &&
2305 rval == QLA_SUCCESS; cnt--) {
2310 rval = QLA_FUNCTION_TIMEOUT;
2312 if (rval == QLA_SUCCESS)
2313 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2315 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2316 "Host Risc 0x%x, mailbox0 0x%x\n",
2317 RD_REG_DWORD(®->hccr),
2318 RD_REG_WORD(®->mailbox0));
2320 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2322 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2323 "Driver in %s mode\n",
2324 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2326 if (IS_NOPOLLING_TYPE(ha))
2327 ha->isp_ops->enable_intrs(ha);
2333 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2335 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2337 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2338 *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2343 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2345 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2347 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2348 WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2352 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2355 uint delta_msec = 100;
2356 uint elapsed_msec = 0;
2360 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2361 vha->hw->pdev->subsystem_device != 0x0240)
2364 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2368 timeout_msec = TIMEOUT_SEMAPHORE;
2369 n = timeout_msec / delta_msec;
2371 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2372 qla25xx_read_risc_sema_reg(vha, &wd32);
2373 if (wd32 & RISC_SEMAPHORE)
2376 elapsed_msec += delta_msec;
2377 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2381 if (!(wd32 & RISC_SEMAPHORE))
2384 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2387 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2388 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2389 n = timeout_msec / delta_msec;
2391 qla25xx_read_risc_sema_reg(vha, &wd32);
2392 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2395 elapsed_msec += delta_msec;
2396 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2400 if (wd32 & RISC_SEMAPHORE_FORCE)
2401 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2406 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2413 * qla24xx_reset_chip() - Reset ISP24xx chip.
2416 * Returns 0 on success.
2419 qla24xx_reset_chip(scsi_qla_host_t *vha)
2421 struct qla_hw_data *ha = vha->hw;
2423 if (pci_channel_offline(ha->pdev) &&
2424 ha->flags.pci_channel_io_perm_failure) {
2428 ha->isp_ops->disable_intrs(ha);
2430 qla25xx_manipulate_risc_semaphore(vha);
2432 /* Perform RISC reset. */
2433 qla24xx_reset_risc(vha);
2437 * qla2x00_chip_diag() - Test chip for proper operation.
2440 * Returns 0 on success.
2443 qla2x00_chip_diag(scsi_qla_host_t *vha)
2446 struct qla_hw_data *ha = vha->hw;
2447 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2448 unsigned long flags = 0;
2452 struct req_que *req = ha->req_q_map[0];
2454 /* Assume a failed state */
2455 rval = QLA_FUNCTION_FAILED;
2457 ql_dbg(ql_dbg_init, vha, 0x007b,
2458 "Testing device at %lx.\n", (u_long)®->flash_address);
2460 spin_lock_irqsave(&ha->hardware_lock, flags);
2462 /* Reset ISP chip. */
2463 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
2466 * We need to have a delay here since the card will not respond while
2467 * in reset causing an MCA on some architectures.
2470 data = qla2x00_debounce_register(®->ctrl_status);
2471 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2473 data = RD_REG_WORD(®->ctrl_status);
2478 goto chip_diag_failed;
2480 ql_dbg(ql_dbg_init, vha, 0x007c,
2481 "Reset register cleared by chip reset.\n");
2483 /* Reset RISC processor. */
2484 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2485 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2487 /* Workaround for QLA2312 PCI parity error */
2488 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2489 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2490 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2492 data = RD_MAILBOX_REG(ha, reg, 0);
2499 goto chip_diag_failed;
2501 /* Check product ID of chip */
2502 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
2504 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2505 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2506 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2507 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2508 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2509 mb[3] != PROD_ID_3) {
2510 ql_log(ql_log_warn, vha, 0x0062,
2511 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2512 mb[1], mb[2], mb[3]);
2514 goto chip_diag_failed;
2516 ha->product_id[0] = mb[1];
2517 ha->product_id[1] = mb[2];
2518 ha->product_id[2] = mb[3];
2519 ha->product_id[3] = mb[4];
2521 /* Adjust fw RISC transfer size */
2522 if (req->length > 1024)
2523 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2525 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
2528 if (IS_QLA2200(ha) &&
2529 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
2530 /* Limit firmware transfer size with a 2200A */
2531 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
2533 ha->device_type |= DT_ISP2200A;
2534 ha->fw_transfer_size = 128;
2537 /* Wrap Incoming Mailboxes Test. */
2538 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2540 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
2541 rval = qla2x00_mbx_reg_test(vha);
2543 ql_log(ql_log_warn, vha, 0x0080,
2544 "Failed mailbox send register test.\n");
2546 /* Flag a successful rval */
2548 spin_lock_irqsave(&ha->hardware_lock, flags);
2552 ql_log(ql_log_info, vha, 0x0081,
2553 "Chip diagnostics **** FAILED ****.\n");
2555 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2561 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
2564 * Returns 0 on success.
2567 qla24xx_chip_diag(scsi_qla_host_t *vha)
2570 struct qla_hw_data *ha = vha->hw;
2571 struct req_que *req = ha->req_q_map[0];
2573 if (IS_P3P_TYPE(ha))
2576 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
2578 rval = qla2x00_mbx_reg_test(vha);
2580 ql_log(ql_log_warn, vha, 0x0082,
2581 "Failed mailbox send register test.\n");
2583 /* Flag a successful rval */
2591 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
2594 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
2595 eft_size, fce_size, mq_size;
2598 struct qla_hw_data *ha = vha->hw;
2599 struct req_que *req = ha->req_q_map[0];
2600 struct rsp_que *rsp = ha->rsp_q_map[0];
2603 ql_dbg(ql_dbg_init, vha, 0x00bd,
2604 "Firmware dump already allocated.\n");
2609 ha->fw_dump_cap_flags = 0;
2610 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2611 req_q_size = rsp_q_size = 0;
2616 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2617 fixed_size = sizeof(struct qla2100_fw_dump);
2618 } else if (IS_QLA23XX(ha)) {
2619 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2620 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2622 } else if (IS_FWI2_CAPABLE(ha)) {
2623 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
2624 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2625 else if (IS_QLA81XX(ha))
2626 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2627 else if (IS_QLA25XX(ha))
2628 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2630 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
2632 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2635 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
2636 mq_size = sizeof(struct qla2xxx_mq_chain);
2638 * Allocate maximum buffer size for all queues.
2639 * Resizing must be done at end-of-dump processing.
2641 mq_size += ha->max_req_queues *
2642 (req->length * sizeof(request_t));
2643 mq_size += ha->max_rsp_queues *
2644 (rsp->length * sizeof(response_t));
2646 if (ha->tgt.atio_ring)
2647 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
2648 /* Allocate memory for Fibre Channel Event Buffer. */
2649 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2655 dma_free_coherent(&ha->pdev->dev,
2656 FCE_SIZE, ha->fce, ha->fce_dma);
2658 /* Allocate memory for Fibre Channel Event Buffer. */
2659 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
2662 ql_log(ql_log_warn, vha, 0x00be,
2663 "Unable to allocate (%d KB) for FCE.\n",
2668 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
2669 ha->fce_mb, &ha->fce_bufs);
2671 ql_log(ql_log_warn, vha, 0x00bf,
2672 "Unable to initialize FCE (%d).\n", rval);
2673 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
2675 ha->flags.fce_enabled = 0;
2678 ql_dbg(ql_dbg_init, vha, 0x00c0,
2679 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
2681 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
2682 ha->flags.fce_enabled = 1;
2683 ha->fce_dma = tc_dma;
2688 dma_free_coherent(&ha->pdev->dev,
2689 EFT_SIZE, ha->eft, ha->eft_dma);
2691 /* Allocate memory for Extended Trace Buffer. */
2692 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
2695 ql_log(ql_log_warn, vha, 0x00c1,
2696 "Unable to allocate (%d KB) for EFT.\n",
2701 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
2703 ql_log(ql_log_warn, vha, 0x00c2,
2704 "Unable to initialize EFT (%d).\n", rval);
2705 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2709 ql_dbg(ql_dbg_init, vha, 0x00c3,
2710 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
2712 eft_size = EFT_SIZE;
2713 ha->eft_dma = tc_dma;
2718 if (IS_QLA27XX(ha)) {
2719 if (!ha->fw_dump_template) {
2720 ql_log(ql_log_warn, vha, 0x00ba,
2721 "Failed missing fwdump template\n");
2724 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
2725 ql_dbg(ql_dbg_init, vha, 0x00fa,
2726 "-> allocating fwdump (%x bytes)...\n", dump_size);
2730 req_q_size = req->length * sizeof(request_t);
2731 rsp_q_size = rsp->length * sizeof(response_t);
2732 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
2733 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
2734 ha->chain_offset = dump_size;
2735 dump_size += mq_size + fce_size;
2737 if (ha->exchoffld_buf)
2738 dump_size += sizeof(struct qla2xxx_offld_chain) +
2740 if (ha->exlogin_buf)
2741 dump_size += sizeof(struct qla2xxx_offld_chain) +
2745 ha->fw_dump = vmalloc(dump_size);
2747 ql_log(ql_log_warn, vha, 0x00c4,
2748 "Unable to allocate (%d KB) for firmware dump.\n",
2752 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2759 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
2766 ha->fw_dump_len = dump_size;
2767 ql_dbg(ql_dbg_init, vha, 0x00c5,
2768 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
2773 ha->fw_dump->signature[0] = 'Q';
2774 ha->fw_dump->signature[1] = 'L';
2775 ha->fw_dump->signature[2] = 'G';
2776 ha->fw_dump->signature[3] = 'C';
2777 ha->fw_dump->version = htonl(1);
2779 ha->fw_dump->fixed_size = htonl(fixed_size);
2780 ha->fw_dump->mem_size = htonl(mem_size);
2781 ha->fw_dump->req_q_size = htonl(req_q_size);
2782 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
2784 ha->fw_dump->eft_size = htonl(eft_size);
2785 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
2786 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
2788 ha->fw_dump->header_size =
2789 htonl(offsetof(struct qla2xxx_fw_dump, isp));
2793 qla81xx_mpi_sync(scsi_qla_host_t *vha)
2795 #define MPS_MASK 0xe0
2800 if (!IS_QLA81XX(vha->hw))
2803 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
2804 if (rval != QLA_SUCCESS) {
2805 ql_log(ql_log_warn, vha, 0x0105,
2806 "Unable to acquire semaphore.\n");
2810 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
2811 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
2812 if (rval != QLA_SUCCESS) {
2813 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
2818 if (dc == (dw & MPS_MASK))
2823 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
2824 if (rval != QLA_SUCCESS) {
2825 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
2829 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
2830 if (rval != QLA_SUCCESS) {
2831 ql_log(ql_log_warn, vha, 0x006d,
2832 "Unable to release semaphore.\n");
2840 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
2842 /* Don't try to reallocate the array */
2843 if (req->outstanding_cmds)
2846 if (!IS_FWI2_CAPABLE(ha))
2847 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
2849 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
2850 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
2852 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
2855 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2856 req->num_outstanding_cmds, GFP_KERNEL);
2858 if (!req->outstanding_cmds) {
2860 * Try to allocate a minimal size just so we can get through
2863 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
2864 req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
2865 req->num_outstanding_cmds, GFP_KERNEL);
2867 if (!req->outstanding_cmds) {
2868 ql_log(ql_log_fatal, NULL, 0x0126,
2869 "Failed to allocate memory for "
2870 "outstanding_cmds for req_que %p.\n", req);
2871 req->num_outstanding_cmds = 0;
2872 return QLA_FUNCTION_FAILED;
2879 #define PRINT_FIELD(_field, _flag, _str) { \
2880 if (a0->_field & _flag) {\
2886 len = snprintf(ptr, leftover, "%s", _str); \
2893 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
2896 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
2897 u8 str[STR_LEN], *ptr, p;
2900 memset(str, 0, STR_LEN);
2901 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
2902 ql_dbg(ql_dbg_init, vha, 0x015a,
2903 "SFP MFG Name: %s\n", str);
2905 memset(str, 0, STR_LEN);
2906 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
2907 ql_dbg(ql_dbg_init, vha, 0x015c,
2908 "SFP Part Name: %s\n", str);
2911 memset(str, 0, STR_LEN);
2915 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
2916 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
2917 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
2918 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
2919 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
2920 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
2921 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
2922 ql_dbg(ql_dbg_init, vha, 0x0160,
2923 "SFP Media: %s\n", str);
2926 memset(str, 0, STR_LEN);
2930 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
2931 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
2932 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
2933 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
2934 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
2935 ql_dbg(ql_dbg_init, vha, 0x0196,
2936 "SFP Link Length: %s\n", str);
2938 memset(str, 0, STR_LEN);
2942 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
2943 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
2944 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
2945 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
2946 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
2947 ql_dbg(ql_dbg_init, vha, 0x016e,
2948 "SFP FC Link Tech: %s\n", str);
2951 ql_dbg(ql_dbg_init, vha, 0x016f,
2952 "SFP Distant: %d km\n", a0->length_km);
2953 if (a0->length_100m)
2954 ql_dbg(ql_dbg_init, vha, 0x0170,
2955 "SFP Distant: %d m\n", a0->length_100m*100);
2956 if (a0->length_50um_10m)
2957 ql_dbg(ql_dbg_init, vha, 0x0189,
2958 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
2959 if (a0->length_62um_10m)
2960 ql_dbg(ql_dbg_init, vha, 0x018a,
2961 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
2962 if (a0->length_om4_10m)
2963 ql_dbg(ql_dbg_init, vha, 0x0194,
2964 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
2965 if (a0->length_om3_10m)
2966 ql_dbg(ql_dbg_init, vha, 0x0195,
2967 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
2973 * QLA_SUCCESS: no action
2974 * QLA_INTERFACE_ERROR: SFP is not there.
2975 * QLA_FUNCTION_FAILED: detected New SFP
2978 qla24xx_detect_sfp(scsi_qla_host_t *vha)
2980 int rc = QLA_SUCCESS;
2981 struct sff_8247_a0 *a;
2982 struct qla_hw_data *ha = vha->hw;
2984 if (!AUTO_DETECT_SFP_SUPPORT(vha))
2987 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
2991 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
2992 qla2xxx_print_sfp_info(vha);
2994 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
2996 ha->flags.detected_lr_sfp = 1;
2998 if (a->length_km > 5 || a->length_100m > 50)
2999 ha->long_range_distance = LR_DISTANCE_10K;
3001 ha->long_range_distance = LR_DISTANCE_5K;
3003 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
3004 ql_dbg(ql_dbg_async, vha, 0x507b,
3005 "Detected Long Range SFP.\n");
3008 ha->flags.detected_lr_sfp = 0;
3009 if (ha->flags.using_lr_setting)
3010 ql_dbg(ql_dbg_async, vha, 0x5084,
3011 "Detected Short Range SFP.\n");
3014 if (!vha->flags.init_done)
3021 * qla2x00_setup_chip() - Load and start RISC firmware.
3024 * Returns 0 on success.
3027 qla2x00_setup_chip(scsi_qla_host_t *vha)
3030 uint32_t srisc_address = 0;
3031 struct qla_hw_data *ha = vha->hw;
3032 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3033 unsigned long flags;
3034 uint16_t fw_major_version;
3036 if (IS_P3P_TYPE(ha)) {
3037 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3038 if (rval == QLA_SUCCESS) {
3039 qla2x00_stop_firmware(vha);
3040 goto enable_82xx_npiv;
3045 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3046 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3047 spin_lock_irqsave(&ha->hardware_lock, flags);
3048 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
3049 RD_REG_WORD(®->hccr);
3050 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3053 qla81xx_mpi_sync(vha);
3055 /* Load firmware sequences */
3056 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3057 if (rval == QLA_SUCCESS) {
3058 ql_dbg(ql_dbg_init, vha, 0x00c9,
3059 "Verifying Checksum of loaded RISC code.\n");
3061 rval = qla2x00_verify_checksum(vha, srisc_address);
3062 if (rval == QLA_SUCCESS) {
3063 /* Start firmware execution. */
3064 ql_dbg(ql_dbg_init, vha, 0x00ca,
3065 "Starting firmware.\n");
3068 ha->flags.exlogins_enabled = 1;
3070 if (qla_is_exch_offld_enabled(vha))
3071 ha->flags.exchoffld_enabled = 1;
3073 rval = qla2x00_execute_fw(vha, srisc_address);
3074 /* Retrieve firmware information. */
3075 if (rval == QLA_SUCCESS) {
3076 qla24xx_detect_sfp(vha);
3078 rval = qla2x00_set_exlogins_buffer(vha);
3079 if (rval != QLA_SUCCESS)
3082 rval = qla2x00_set_exchoffld_buffer(vha);
3083 if (rval != QLA_SUCCESS)
3087 fw_major_version = ha->fw_major_version;
3088 if (IS_P3P_TYPE(ha))
3089 qla82xx_check_md_needed(vha);
3091 rval = qla2x00_get_fw_version(vha);
3092 if (rval != QLA_SUCCESS)
3094 ha->flags.npiv_supported = 0;
3095 if (IS_QLA2XXX_MIDTYPE(ha) &&
3096 (ha->fw_attributes & BIT_2)) {
3097 ha->flags.npiv_supported = 1;
3098 if ((!ha->max_npiv_vports) ||
3099 ((ha->max_npiv_vports + 1) %
3100 MIN_MULTI_ID_FABRIC))
3101 ha->max_npiv_vports =
3102 MIN_MULTI_ID_FABRIC - 1;
3104 qla2x00_get_resource_cnts(vha);
3107 * Allocate the array of outstanding commands
3108 * now that we know the firmware resources.
3110 rval = qla2x00_alloc_outstanding_cmds(ha,
3112 if (rval != QLA_SUCCESS)
3115 if (!fw_major_version && ql2xallocfwdump
3116 && !(IS_P3P_TYPE(ha)))
3117 qla2x00_alloc_fw_dump(vha);
3122 ql_log(ql_log_fatal, vha, 0x00cd,
3123 "ISP Firmware failed checksum.\n");
3129 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3130 /* Enable proper parity. */
3131 spin_lock_irqsave(&ha->hardware_lock, flags);
3134 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1);
3136 /* SRAM, Instruction RAM and GP RAM parity */
3137 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7);
3138 RD_REG_WORD(®->hccr);
3139 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3143 ha->flags.fac_supported = 1;
3144 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
3147 rval = qla81xx_fac_get_sector_size(vha, &size);
3148 if (rval == QLA_SUCCESS) {
3149 ha->flags.fac_supported = 1;
3150 ha->fdt_block_size = size << 2;
3152 ql_log(ql_log_warn, vha, 0x00ce,
3153 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3154 ha->fw_major_version, ha->fw_minor_version,
3155 ha->fw_subminor_version);
3157 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3158 ha->flags.fac_supported = 0;
3165 ql_log(ql_log_fatal, vha, 0x00cf,
3166 "Setup chip ****FAILED****.\n");
3173 * qla2x00_init_response_q_entries() - Initializes response queue entries.
3176 * Beginning of request ring has initialization control block already built
3177 * by nvram config routine.
3179 * Returns 0 on success.
3182 qla2x00_init_response_q_entries(struct rsp_que *rsp)
3187 rsp->ring_ptr = rsp->ring;
3188 rsp->ring_index = 0;
3189 rsp->status_srb = NULL;
3190 pkt = rsp->ring_ptr;
3191 for (cnt = 0; cnt < rsp->length; cnt++) {
3192 pkt->signature = RESPONSE_PROCESSED;
3198 * qla2x00_update_fw_options() - Read and process firmware options.
3201 * Returns 0 on success.
3204 qla2x00_update_fw_options(scsi_qla_host_t *vha)
3206 uint16_t swing, emphasis, tx_sens, rx_sens;
3207 struct qla_hw_data *ha = vha->hw;
3209 memset(ha->fw_options, 0, sizeof(ha->fw_options));
3210 qla2x00_get_fw_options(vha, ha->fw_options);
3212 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3215 /* Serial Link options. */
3216 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3217 "Serial link options.\n");
3218 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3219 (uint8_t *)&ha->fw_seriallink_options,
3220 sizeof(ha->fw_seriallink_options));
3222 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3223 if (ha->fw_seriallink_options[3] & BIT_2) {
3224 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3227 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3228 emphasis = (ha->fw_seriallink_options[2] &
3229 (BIT_4 | BIT_3)) >> 3;
3230 tx_sens = ha->fw_seriallink_options[0] &
3231 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3232 rx_sens = (ha->fw_seriallink_options[0] &
3233 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3234 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3235 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3238 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3239 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3240 ha->fw_options[10] |= BIT_5 |
3241 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3242 (tx_sens & (BIT_1 | BIT_0));
3245 swing = (ha->fw_seriallink_options[2] &
3246 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3247 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3248 tx_sens = ha->fw_seriallink_options[1] &
3249 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3250 rx_sens = (ha->fw_seriallink_options[1] &
3251 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3252 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3253 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3256 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3257 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3258 ha->fw_options[11] |= BIT_5 |
3259 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3260 (tx_sens & (BIT_1 | BIT_0));
3264 /* Return command IOCBs without waiting for an ABTS to complete. */
3265 ha->fw_options[3] |= BIT_13;
3268 if (ha->flags.enable_led_scheme)
3269 ha->fw_options[2] |= BIT_12;
3271 /* Detect ISP6312. */
3273 ha->fw_options[2] |= BIT_13;
3275 /* Set Retry FLOGI in case of P2P connection */
3276 if (ha->operating_mode == P2P) {
3277 ha->fw_options[2] |= BIT_3;
3278 ql_dbg(ql_dbg_disc, vha, 0x2100,
3279 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3280 __func__, ha->fw_options[2]);
3283 /* Update firmware options. */
3284 qla2x00_set_fw_options(vha, ha->fw_options);
3288 qla24xx_update_fw_options(scsi_qla_host_t *vha)
3291 struct qla_hw_data *ha = vha->hw;
3293 if (IS_P3P_TYPE(ha))
3296 /* Hold status IOCBs until ABTS response received. */
3298 ha->fw_options[3] |= BIT_12;
3300 /* Set Retry FLOGI in case of P2P connection */
3301 if (ha->operating_mode == P2P) {
3302 ha->fw_options[2] |= BIT_3;
3303 ql_dbg(ql_dbg_disc, vha, 0x2101,
3304 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3305 __func__, ha->fw_options[2]);
3308 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3309 if (ql2xmvasynctoatio &&
3310 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
3311 if (qla_tgt_mode_enabled(vha) ||
3312 qla_dual_mode_enabled(vha))
3313 ha->fw_options[2] |= BIT_11;
3315 ha->fw_options[2] &= ~BIT_11;
3318 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3320 * Tell FW to track each exchange to prevent
3321 * driver from using stale exchange.
3323 if (qla_tgt_mode_enabled(vha) ||
3324 qla_dual_mode_enabled(vha))
3325 ha->fw_options[2] |= BIT_4;
3327 ha->fw_options[2] &= ~BIT_4;
3330 ql_dbg(ql_dbg_init, vha, 0x00e8,
3331 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3332 __func__, ha->fw_options[1], ha->fw_options[2],
3333 ha->fw_options[3], vha->host->active_mode);
3335 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3336 qla2x00_set_fw_options(vha, ha->fw_options);
3338 /* Update Serial Link options. */
3339 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
3342 rval = qla2x00_set_serdes_params(vha,
3343 le16_to_cpu(ha->fw_seriallink_options24[1]),
3344 le16_to_cpu(ha->fw_seriallink_options24[2]),
3345 le16_to_cpu(ha->fw_seriallink_options24[3]));
3346 if (rval != QLA_SUCCESS) {
3347 ql_log(ql_log_warn, vha, 0x0104,
3348 "Unable to update Serial Link options (%x).\n", rval);
3353 qla2x00_config_rings(struct scsi_qla_host *vha)
3355 struct qla_hw_data *ha = vha->hw;
3356 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3357 struct req_que *req = ha->req_q_map[0];
3358 struct rsp_que *rsp = ha->rsp_q_map[0];
3360 /* Setup ring parameters in initialization control block. */
3361 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3362 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
3363 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3364 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3365 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3366 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3367 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3368 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
3370 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3371 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3372 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3373 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3374 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3378 qla24xx_config_rings(struct scsi_qla_host *vha)
3380 struct qla_hw_data *ha = vha->hw;
3381 device_reg_t *reg = ISP_QUE_REG(ha, 0);
3382 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3383 struct qla_msix_entry *msix;
3384 struct init_cb_24xx *icb;
3386 struct req_que *req = ha->req_q_map[0];
3387 struct rsp_que *rsp = ha->rsp_q_map[0];
3389 /* Setup ring parameters in initialization control block. */
3390 icb = (struct init_cb_24xx *)ha->init_cb;
3391 icb->request_q_outpointer = cpu_to_le16(0);
3392 icb->response_q_inpointer = cpu_to_le16(0);
3393 icb->request_q_length = cpu_to_le16(req->length);
3394 icb->response_q_length = cpu_to_le16(rsp->length);
3395 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
3396 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
3397 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
3398 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
3400 /* Setup ATIO queue dma pointers for target mode */
3401 icb->atio_q_inpointer = cpu_to_le16(0);
3402 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3403 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
3404 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
3406 if (IS_SHADOW_REG_CAPABLE(ha))
3407 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
3409 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3410 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3411 icb->rid = cpu_to_le16(rid);
3412 if (ha->flags.msix_enabled) {
3413 msix = &ha->msix_entries[1];
3414 ql_dbg(ql_dbg_init, vha, 0x0019,
3415 "Registering vector 0x%x for base que.\n",
3417 icb->msix = cpu_to_le16(msix->entry);
3419 /* Use alternate PCI bus number */
3421 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
3422 /* Use alternate PCI devfn */
3424 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
3426 /* Use Disable MSIX Handshake mode for capable adapters */
3427 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3428 (ha->flags.msix_enabled)) {
3429 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3430 ha->flags.disable_msix_handshake = 1;
3431 ql_dbg(ql_dbg_init, vha, 0x00fe,
3432 "MSIX Handshake Disable Mode turned on.\n");
3434 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3436 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
3438 WRT_REG_DWORD(®->isp25mq.req_q_in, 0);
3439 WRT_REG_DWORD(®->isp25mq.req_q_out, 0);
3440 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0);
3441 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0);
3443 WRT_REG_DWORD(®->isp24.req_q_in, 0);
3444 WRT_REG_DWORD(®->isp24.req_q_out, 0);
3445 WRT_REG_DWORD(®->isp24.rsp_q_in, 0);
3446 WRT_REG_DWORD(®->isp24.rsp_q_out, 0);
3448 qlt_24xx_config_rings(vha);
3451 RD_REG_DWORD(&ioreg->hccr);
3455 * qla2x00_init_rings() - Initializes firmware.
3458 * Beginning of request ring has initialization control block already built
3459 * by nvram config routine.
3461 * Returns 0 on success.
3464 qla2x00_init_rings(scsi_qla_host_t *vha)
3467 unsigned long flags = 0;
3469 struct qla_hw_data *ha = vha->hw;
3470 struct req_que *req;
3471 struct rsp_que *rsp;
3472 struct mid_init_cb_24xx *mid_init_cb =
3473 (struct mid_init_cb_24xx *) ha->init_cb;
3475 spin_lock_irqsave(&ha->hardware_lock, flags);
3477 /* Clear outstanding commands array. */
3478 for (que = 0; que < ha->max_req_queues; que++) {
3479 req = ha->req_q_map[que];
3480 if (!req || !test_bit(que, ha->req_qid_map))
3482 req->out_ptr = (void *)(req->ring + req->length);
3484 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
3485 req->outstanding_cmds[cnt] = NULL;
3487 req->current_outstanding_cmd = 1;
3489 /* Initialize firmware. */
3490 req->ring_ptr = req->ring;
3491 req->ring_index = 0;
3492 req->cnt = req->length;
3495 for (que = 0; que < ha->max_rsp_queues; que++) {
3496 rsp = ha->rsp_q_map[que];
3497 if (!rsp || !test_bit(que, ha->rsp_qid_map))
3499 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
3501 /* Initialize response queue entries */
3503 qlafx00_init_response_q_entries(rsp);
3505 qla2x00_init_response_q_entries(rsp);
3508 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
3509 ha->tgt.atio_ring_index = 0;
3510 /* Initialize ATIO queue entries */
3511 qlt_init_atio_q_entries(vha);
3513 ha->isp_ops->config_rings(vha);
3515 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3517 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
3519 if (IS_QLAFX00(ha)) {
3520 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
3524 /* Update any ISP specific firmware options before initialization. */
3525 ha->isp_ops->update_fw_options(vha);
3527 if (ha->flags.npiv_supported) {
3528 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
3529 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
3530 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
3533 if (IS_FWI2_CAPABLE(ha)) {
3534 mid_init_cb->options = cpu_to_le16(BIT_1);
3535 mid_init_cb->init_cb.execution_throttle =
3536 cpu_to_le16(ha->cur_fw_xcb_count);
3537 ha->flags.dport_enabled =
3538 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
3539 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
3540 (ha->flags.dport_enabled) ? "enabled" : "disabled");
3541 /* FA-WWPN Status */
3542 ha->flags.fawwpn_enabled =
3543 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
3544 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
3545 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
3548 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
3551 ql_log(ql_log_fatal, vha, 0x00d2,
3552 "Init Firmware **** FAILED ****.\n");
3554 ql_dbg(ql_dbg_init, vha, 0x00d3,
3555 "Init Firmware -- success.\n");
3563 * qla2x00_fw_ready() - Waits for firmware ready.
3566 * Returns 0 on success.
3569 qla2x00_fw_ready(scsi_qla_host_t *vha)
3572 unsigned long wtime, mtime, cs84xx_time;
3573 uint16_t min_wait; /* Minimum wait time if loop is down */
3574 uint16_t wait_time; /* Wait time if loop is coming ready */
3576 struct qla_hw_data *ha = vha->hw;
3578 if (IS_QLAFX00(vha->hw))
3579 return qlafx00_fw_ready(vha);
3583 /* Time to wait for loop down */
3584 if (IS_P3P_TYPE(ha))
3590 * Firmware should take at most one RATOV to login, plus 5 seconds for
3591 * our own processing.
3593 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
3594 wait_time = min_wait;
3597 /* Min wait time if loop down */
3598 mtime = jiffies + (min_wait * HZ);
3600 /* wait time before firmware ready */
3601 wtime = jiffies + (wait_time * HZ);
3603 /* Wait for ISP to finish LIP */
3604 if (!vha->flags.init_done)
3605 ql_log(ql_log_info, vha, 0x801e,
3606 "Waiting for LIP to complete.\n");
3609 memset(state, -1, sizeof(state));
3610 rval = qla2x00_get_firmware_state(vha, state);
3611 if (rval == QLA_SUCCESS) {
3612 if (state[0] < FSTATE_LOSS_OF_SYNC) {
3613 vha->device_flags &= ~DFLG_NO_CABLE;
3615 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
3616 ql_dbg(ql_dbg_taskm, vha, 0x801f,
3617 "fw_state=%x 84xx=%x.\n", state[0],
3619 if ((state[2] & FSTATE_LOGGED_IN) &&
3620 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
3621 ql_dbg(ql_dbg_taskm, vha, 0x8028,
3622 "Sending verify iocb.\n");
3624 cs84xx_time = jiffies;
3625 rval = qla84xx_init_chip(vha);
3626 if (rval != QLA_SUCCESS) {
3629 "Init chip failed.\n");
3633 /* Add time taken to initialize. */
3634 cs84xx_time = jiffies - cs84xx_time;
3635 wtime += cs84xx_time;
3636 mtime += cs84xx_time;
3637 ql_dbg(ql_dbg_taskm, vha, 0x8008,
3638 "Increasing wait time by %ld. "
3639 "New time %ld.\n", cs84xx_time,
3642 } else if (state[0] == FSTATE_READY) {
3643 ql_dbg(ql_dbg_taskm, vha, 0x8037,
3644 "F/W Ready - OK.\n");
3646 qla2x00_get_retry_cnt(vha, &ha->retry_count,
3647 &ha->login_timeout, &ha->r_a_tov);
3653 rval = QLA_FUNCTION_FAILED;
3655 if (atomic_read(&vha->loop_down_timer) &&
3656 state[0] != FSTATE_READY) {
3657 /* Loop down. Timeout on min_wait for states
3658 * other than Wait for Login.
3660 if (time_after_eq(jiffies, mtime)) {
3661 ql_log(ql_log_info, vha, 0x8038,
3662 "Cable is unplugged...\n");
3664 vha->device_flags |= DFLG_NO_CABLE;
3669 /* Mailbox cmd failed. Timeout on min_wait. */
3670 if (time_after_eq(jiffies, mtime) ||
3671 ha->flags.isp82xx_fw_hung)
3675 if (time_after_eq(jiffies, wtime))
3678 /* Delay for a while */
3682 ql_dbg(ql_dbg_taskm, vha, 0x803a,
3683 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
3684 state[1], state[2], state[3], state[4], state[5], jiffies);
3686 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
3687 ql_log(ql_log_warn, vha, 0x803b,
3688 "Firmware ready **** FAILED ****.\n");
3695 * qla2x00_configure_hba
3696 * Setup adapter context.
3699 * ha = adapter state pointer.
3708 qla2x00_configure_hba(scsi_qla_host_t *vha)
3717 char connect_type[22];
3718 struct qla_hw_data *ha = vha->hw;
3719 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3722 /* Get host addresses. */
3723 rval = qla2x00_get_adapter_id(vha,
3724 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
3725 if (rval != QLA_SUCCESS) {
3726 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
3727 IS_CNA_CAPABLE(ha) ||
3728 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
3729 ql_dbg(ql_dbg_disc, vha, 0x2008,
3730 "Loop is in a transition state.\n");
3732 ql_log(ql_log_warn, vha, 0x2009,
3733 "Unable to get host loop ID.\n");
3734 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
3735 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
3736 ql_log(ql_log_warn, vha, 0x1151,
3737 "Doing link init.\n");
3738 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
3741 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3747 ql_log(ql_log_info, vha, 0x200a,
3748 "Cannot get topology - retrying.\n");
3749 return (QLA_FUNCTION_FAILED);
3752 vha->loop_id = loop_id;
3755 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
3756 ha->operating_mode = LOOP;
3761 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
3762 ha->current_topology = ISP_CFG_NL;
3763 strcpy(connect_type, "(Loop)");
3767 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
3768 ha->switch_cap = sw_cap;
3769 ha->current_topology = ISP_CFG_FL;
3770 strcpy(connect_type, "(FL_Port)");
3774 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
3775 ha->operating_mode = P2P;
3776 ha->current_topology = ISP_CFG_N;
3777 strcpy(connect_type, "(N_Port-to-N_Port)");
3781 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
3782 ha->switch_cap = sw_cap;
3783 ha->operating_mode = P2P;
3784 ha->current_topology = ISP_CFG_F;
3785 strcpy(connect_type, "(F_Port)");
3789 ql_dbg(ql_dbg_disc, vha, 0x200f,
3790 "HBA in unknown topology %x, using NL.\n", topo);
3791 ha->current_topology = ISP_CFG_NL;
3792 strcpy(connect_type, "(Loop)");
3796 /* Save Host port and loop ID. */
3797 /* byte order - Big Endian */
3798 id.b.domain = domain;
3802 qlt_update_host_map(vha, id);
3804 if (!vha->flags.init_done)
3805 ql_log(ql_log_info, vha, 0x2010,
3806 "Topology - %s, Host Loop address 0x%x.\n",
3807 connect_type, vha->loop_id);
3813 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
3818 struct qla_hw_data *ha = vha->hw;
3819 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
3820 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
3822 if (memcmp(model, BINZERO, len) != 0) {
3823 strncpy(ha->model_number, model, len);
3824 st = en = ha->model_number;
3827 if (*en != 0x20 && *en != 0x00)
3832 index = (ha->pdev->subsystem_device & 0xff);
3834 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
3835 index < QLA_MODEL_NAMES)
3836 strncpy(ha->model_desc,
3837 qla2x00_model_name[index * 2 + 1],
3838 sizeof(ha->model_desc) - 1);
3840 index = (ha->pdev->subsystem_device & 0xff);
3842 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
3843 index < QLA_MODEL_NAMES) {
3844 strcpy(ha->model_number,
3845 qla2x00_model_name[index * 2]);
3846 strncpy(ha->model_desc,
3847 qla2x00_model_name[index * 2 + 1],
3848 sizeof(ha->model_desc) - 1);
3850 strcpy(ha->model_number, def);
3853 if (IS_FWI2_CAPABLE(ha))
3854 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
3855 sizeof(ha->model_desc));
3858 /* On sparc systems, obtain port and node WWN from firmware
3861 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
3864 struct qla_hw_data *ha = vha->hw;
3865 struct pci_dev *pdev = ha->pdev;
3866 struct device_node *dp = pci_device_to_OF_node(pdev);
3870 val = of_get_property(dp, "port-wwn", &len);
3871 if (val && len >= WWN_SIZE)
3872 memcpy(nv->port_name, val, WWN_SIZE);
3874 val = of_get_property(dp, "node-wwn", &len);
3875 if (val && len >= WWN_SIZE)
3876 memcpy(nv->node_name, val, WWN_SIZE);
3881 * NVRAM configuration for ISP 2xxx
3884 * ha = adapter block pointer.
3887 * initialization control block in response_ring
3888 * host adapters parameters in host adapter block
3894 qla2x00_nvram_config(scsi_qla_host_t *vha)
3899 uint8_t *dptr1, *dptr2;
3900 struct qla_hw_data *ha = vha->hw;
3901 init_cb_t *icb = ha->init_cb;
3902 nvram_t *nv = ha->nvram;
3903 uint8_t *ptr = ha->nvram;
3904 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3908 /* Determine NVRAM starting address. */
3909 ha->nvram_size = sizeof(nvram_t);
3911 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
3912 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1)
3913 ha->nvram_base = 0x80;
3915 /* Get NVRAM data and calculate checksum. */
3916 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
3917 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
3920 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
3921 "Contents of NVRAM.\n");
3922 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
3923 (uint8_t *)nv, ha->nvram_size);
3925 /* Bad NVRAM data, set defaults parameters. */
3926 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
3927 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
3928 /* Reset NVRAM data. */
3929 ql_log(ql_log_warn, vha, 0x0064,
3930 "Inconsistent NVRAM "
3931 "detected: checksum=0x%x id=%c version=0x%x.\n",
3932 chksum, nv->id[0], nv->nvram_version);
3933 ql_log(ql_log_warn, vha, 0x0065,
3935 "functioning (yet invalid -- WWPN) defaults.\n");
3938 * Set default initialization control block.
3940 memset(nv, 0, ha->nvram_size);
3941 nv->parameter_block_version = ICB_VERSION;
3943 if (IS_QLA23XX(ha)) {
3944 nv->firmware_options[0] = BIT_2 | BIT_1;
3945 nv->firmware_options[1] = BIT_7 | BIT_5;
3946 nv->add_firmware_options[0] = BIT_5;
3947 nv->add_firmware_options[1] = BIT_5 | BIT_4;
3948 nv->frame_payload_size = 2048;
3949 nv->special_options[1] = BIT_7;
3950 } else if (IS_QLA2200(ha)) {
3951 nv->firmware_options[0] = BIT_2 | BIT_1;
3952 nv->firmware_options[1] = BIT_7 | BIT_5;
3953 nv->add_firmware_options[0] = BIT_5;
3954 nv->add_firmware_options[1] = BIT_5 | BIT_4;
3955 nv->frame_payload_size = 1024;
3956 } else if (IS_QLA2100(ha)) {
3957 nv->firmware_options[0] = BIT_3 | BIT_1;
3958 nv->firmware_options[1] = BIT_5;
3959 nv->frame_payload_size = 1024;
3962 nv->max_iocb_allocation = cpu_to_le16(256);
3963 nv->execution_throttle = cpu_to_le16(16);
3964 nv->retry_count = 8;
3965 nv->retry_delay = 1;
3967 nv->port_name[0] = 33;
3968 nv->port_name[3] = 224;
3969 nv->port_name[4] = 139;
3971 qla2xxx_nvram_wwn_from_ofw(vha, nv);
3973 nv->login_timeout = 4;
3976 * Set default host adapter parameters
3978 nv->host_p[1] = BIT_2;
3979 nv->reset_delay = 5;
3980 nv->port_down_retry_count = 8;
3981 nv->max_luns_per_target = cpu_to_le16(8);
3982 nv->link_down_timeout = 60;
3987 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
3989 * The SN2 does not provide BIOS emulation which means you can't change
3990 * potentially bogus BIOS settings. Force the use of default settings
3991 * for link rate and frame size. Hope that the rest of the settings
3994 if (ia64_platform_is("sn2")) {
3995 nv->frame_payload_size = 2048;
3997 nv->special_options[1] = BIT_7;
4001 /* Reset Initialization control block */
4002 memset(icb, 0, ha->init_cb_size);
4005 * Setup driver NVRAM options.
4007 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4008 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4009 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4010 nv->firmware_options[1] &= ~BIT_4;
4012 if (IS_QLA23XX(ha)) {
4013 nv->firmware_options[0] |= BIT_2;
4014 nv->firmware_options[0] &= ~BIT_3;
4015 nv->special_options[0] &= ~BIT_6;
4016 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
4018 if (IS_QLA2300(ha)) {
4019 if (ha->fb_rev == FPM_2310) {
4020 strcpy(ha->model_number, "QLA2310");
4022 strcpy(ha->model_number, "QLA2300");
4025 qla2x00_set_model_info(vha, nv->model_number,
4026 sizeof(nv->model_number), "QLA23xx");
4028 } else if (IS_QLA2200(ha)) {
4029 nv->firmware_options[0] |= BIT_2;
4031 * 'Point-to-point preferred, else loop' is not a safe
4032 * connection mode setting.
4034 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4036 /* Force 'loop preferred, else point-to-point'. */
4037 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4038 nv->add_firmware_options[0] |= BIT_5;
4040 strcpy(ha->model_number, "QLA22xx");
4041 } else /*if (IS_QLA2100(ha))*/ {
4042 strcpy(ha->model_number, "QLA2100");
4046 * Copy over NVRAM RISC parameter block to initialization control block.
4048 dptr1 = (uint8_t *)icb;
4049 dptr2 = (uint8_t *)&nv->parameter_block_version;
4050 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4052 *dptr1++ = *dptr2++;
4054 /* Copy 2nd half. */
4055 dptr1 = (uint8_t *)icb->add_firmware_options;
4056 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4058 *dptr1++ = *dptr2++;
4060 /* Use alternate WWN? */
4061 if (nv->host_p[1] & BIT_7) {
4062 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4063 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4066 /* Prepare nodename */
4067 if ((icb->firmware_options[1] & BIT_6) == 0) {
4069 * Firmware will apply the following mask if the nodename was
4072 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4073 icb->node_name[0] &= 0xF0;
4077 * Set host adapter parameters.
4081 * BIT_7 in the host-parameters section allows for modification to
4082 * internal driver logging.
4084 if (nv->host_p[0] & BIT_7)
4085 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
4086 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4087 /* Always load RISC code on non ISP2[12]00 chips. */
4088 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4089 ha->flags.disable_risc_code_load = 0;
4090 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4091 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4092 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
4093 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
4094 ha->flags.disable_serdes = 0;
4096 ha->operating_mode =
4097 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4099 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4100 sizeof(ha->fw_seriallink_options));
4102 /* save HBA serial number */
4103 ha->serial0 = icb->port_name[5];
4104 ha->serial1 = icb->port_name[6];
4105 ha->serial2 = icb->port_name[7];
4106 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4107 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4109 icb->execution_throttle = cpu_to_le16(0xFFFF);
4111 ha->retry_count = nv->retry_count;
4113 /* Set minimum login_timeout to 4 seconds. */
4114 if (nv->login_timeout != ql2xlogintimeout)
4115 nv->login_timeout = ql2xlogintimeout;
4116 if (nv->login_timeout < 4)
4117 nv->login_timeout = 4;
4118 ha->login_timeout = nv->login_timeout;
4120 /* Set minimum RATOV to 100 tenths of a second. */
4123 ha->loop_reset_delay = nv->reset_delay;
4125 /* Link Down Timeout = 0:
4127 * When Port Down timer expires we will start returning
4128 * I/O's to OS with "DID_NO_CONNECT".
4130 * Link Down Timeout != 0:
4132 * The driver waits for the link to come up after link down
4133 * before returning I/Os to OS with "DID_NO_CONNECT".
4135 if (nv->link_down_timeout == 0) {
4136 ha->loop_down_abort_time =
4137 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4139 ha->link_down_timeout = nv->link_down_timeout;
4140 ha->loop_down_abort_time =
4141 (LOOP_DOWN_TIME - ha->link_down_timeout);
4145 * Need enough time to try and get the port back.
4147 ha->port_down_retry_count = nv->port_down_retry_count;
4148 if (qlport_down_retry)
4149 ha->port_down_retry_count = qlport_down_retry;
4150 /* Set login_retry_count */
4151 ha->login_retry_count = nv->retry_count;
4152 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4153 ha->port_down_retry_count > 3)
4154 ha->login_retry_count = ha->port_down_retry_count;
4155 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4156 ha->login_retry_count = ha->port_down_retry_count;
4157 if (ql2xloginretrycount)
4158 ha->login_retry_count = ql2xloginretrycount;
4160 icb->lun_enables = cpu_to_le16(0);
4161 icb->command_resource_count = 0;
4162 icb->immediate_notify_resource_count = 0;
4163 icb->timeout = cpu_to_le16(0);
4165 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4167 icb->firmware_options[0] &= ~BIT_3;
4168 icb->add_firmware_options[0] &=
4169 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4170 icb->add_firmware_options[0] |= BIT_2;
4171 icb->response_accumulation_timer = 3;
4172 icb->interrupt_delay_timer = 5;
4174 vha->flags.process_response_queue = 1;
4177 if (!vha->flags.init_done) {
4178 ha->zio_mode = icb->add_firmware_options[0] &
4179 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4180 ha->zio_timer = icb->interrupt_delay_timer ?
4181 icb->interrupt_delay_timer: 2;
4183 icb->add_firmware_options[0] &=
4184 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4185 vha->flags.process_response_queue = 0;
4186 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4187 ha->zio_mode = QLA_ZIO_MODE_6;
4189 ql_log(ql_log_info, vha, 0x0068,
4190 "ZIO mode %d enabled; timer delay (%d us).\n",
4191 ha->zio_mode, ha->zio_timer * 100);
4193 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4194 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
4195 vha->flags.process_response_queue = 1;
4200 ql_log(ql_log_warn, vha, 0x0069,
4201 "NVRAM configuration failed.\n");
4207 qla2x00_rport_del(void *data)
4209 fc_port_t *fcport = data;
4210 struct fc_rport *rport;
4211 unsigned long flags;
4213 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4214 rport = fcport->drport ? fcport->drport: fcport->rport;
4215 fcport->drport = NULL;
4216 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4218 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4219 "%s %8phN. rport %p roles %x\n",
4220 __func__, fcport->port_name, rport,
4223 fc_remote_port_delete(rport);
4228 * qla2x00_alloc_fcport() - Allocate a generic fcport.
4230 * @flags: allocation flags
4232 * Returns a pointer to the allocated fcport, or NULL, if none available.
4235 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4239 fcport = kzalloc(sizeof(fc_port_t), flags);
4243 /* Setup fcport template structure. */
4245 fcport->port_type = FCT_UNKNOWN;
4246 fcport->loop_id = FC_NO_LOOP_ID;
4247 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
4248 fcport->supported_classes = FC_COS_UNSPECIFIED;
4250 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4251 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4253 fcport->disc_state = DSC_DELETED;
4254 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4255 fcport->deleted = QLA_SESS_DELETED;
4256 fcport->login_retry = vha->hw->login_retry_count;
4257 fcport->login_retry = 5;
4258 fcport->logout_on_delete = 1;
4260 if (!fcport->ct_desc.ct_sns) {
4261 ql_log(ql_log_warn, vha, 0xd049,
4262 "Failed to allocate ct_sns request.\n");
4266 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4267 INIT_LIST_HEAD(&fcport->gnl_entry);
4268 INIT_LIST_HEAD(&fcport->list);
4274 qla2x00_free_fcport(fc_port_t *fcport)
4276 if (fcport->ct_desc.ct_sns) {
4277 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4278 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4279 fcport->ct_desc.ct_sns_dma);
4281 fcport->ct_desc.ct_sns = NULL;
4287 * qla2x00_configure_loop
4288 * Updates Fibre Channel Device Database with what is actually on loop.
4291 * ha = adapter block pointer.
4296 * 2 = database was full and device was not configured.
4299 qla2x00_configure_loop(scsi_qla_host_t *vha)
4302 unsigned long flags, save_flags;
4303 struct qla_hw_data *ha = vha->hw;
4306 /* Get Initiator ID */
4307 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4308 rval = qla2x00_configure_hba(vha);
4309 if (rval != QLA_SUCCESS) {
4310 ql_dbg(ql_dbg_disc, vha, 0x2013,
4311 "Unable to configure HBA.\n");
4316 save_flags = flags = vha->dpc_flags;
4317 ql_dbg(ql_dbg_disc, vha, 0x2014,
4318 "Configure loop -- dpc flags = 0x%lx.\n", flags);
4321 * If we have both an RSCN and PORT UPDATE pending then handle them
4322 * both at the same time.
4324 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4325 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
4327 qla2x00_get_data_rate(vha);
4329 /* Determine what we need to do */
4330 if (ha->current_topology == ISP_CFG_FL &&
4331 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4333 set_bit(RSCN_UPDATE, &flags);
4335 } else if (ha->current_topology == ISP_CFG_F &&
4336 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4338 set_bit(RSCN_UPDATE, &flags);
4339 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4341 } else if (ha->current_topology == ISP_CFG_N) {
4342 clear_bit(RSCN_UPDATE, &flags);
4343 } else if (ha->current_topology == ISP_CFG_NL) {
4344 clear_bit(RSCN_UPDATE, &flags);
4345 set_bit(LOCAL_LOOP_UPDATE, &flags);
4346 } else if (!vha->flags.online ||
4347 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
4348 set_bit(RSCN_UPDATE, &flags);
4349 set_bit(LOCAL_LOOP_UPDATE, &flags);
4352 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
4353 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4354 ql_dbg(ql_dbg_disc, vha, 0x2015,
4355 "Loop resync needed, failing.\n");
4356 rval = QLA_FUNCTION_FAILED;
4358 rval = qla2x00_configure_local_loop(vha);
4361 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
4362 if (LOOP_TRANSITION(vha)) {
4363 ql_dbg(ql_dbg_disc, vha, 0x2099,
4364 "Needs RSCN update and loop transition.\n");
4365 rval = QLA_FUNCTION_FAILED;
4368 rval = qla2x00_configure_fabric(vha);
4371 if (rval == QLA_SUCCESS) {
4372 if (atomic_read(&vha->loop_down_timer) ||
4373 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4374 rval = QLA_FUNCTION_FAILED;
4376 atomic_set(&vha->loop_state, LOOP_READY);
4377 ql_dbg(ql_dbg_disc, vha, 0x2069,
4379 ha->flags.fw_init_done = 1;
4382 * Process any ATIO queue entries that came in
4383 * while we weren't online.
4385 if (qla_tgt_mode_enabled(vha) ||
4386 qla_dual_mode_enabled(vha)) {
4387 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
4388 spin_lock_irqsave(&ha->tgt.atio_lock,
4390 qlt_24xx_process_atio_queue(vha, 0);
4391 spin_unlock_irqrestore(
4392 &ha->tgt.atio_lock, flags);
4394 spin_lock_irqsave(&ha->hardware_lock,
4396 qlt_24xx_process_atio_queue(vha, 1);
4397 spin_unlock_irqrestore(
4398 &ha->hardware_lock, flags);
4405 ql_dbg(ql_dbg_disc, vha, 0x206a,
4406 "%s *** FAILED ***.\n", __func__);
4408 ql_dbg(ql_dbg_disc, vha, 0x206b,
4409 "%s: exiting normally.\n", __func__);
4412 /* Restore state if a resync event occurred during processing */
4413 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4414 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
4415 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4416 if (test_bit(RSCN_UPDATE, &save_flags)) {
4417 set_bit(RSCN_UPDATE, &vha->dpc_flags);
4426 * Updates Fibre Channel Device Database with local loop devices.
4429 * ha = adapter block pointer.
4433 static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
4436 struct qla_hw_data *ha = vha->hw;
4437 int res = QLA_SUCCESS, rval;
4438 int greater_wwpn = 0;
4441 if (ha->current_topology != ISP_CFG_N)
4444 if (wwn_to_u64(vha->port_name) >
4445 wwn_to_u64(vha->n2n_port_name)) {
4446 ql_dbg(ql_dbg_disc, vha, 0x2002,
4447 "HBA WWPN is greater %llx > target %llx\n",
4448 wwn_to_u64(vha->port_name),
4449 wwn_to_u64(vha->n2n_port_name));
4451 fcport->d_id.b24 = vha->n2n_id;
4454 fcport->loop_id = vha->loop_id;
4455 fcport->fc4f_nvme = 0;
4458 ql_dbg(ql_dbg_disc, vha, 0x4001,
4459 "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
4460 fcport->d_id.b24, vha->loop_id);
4462 /* Fill in member data. */
4463 if (!greater_wwpn) {
4464 rval = qla2x00_get_port_database(vha, fcport, 0);
4465 ql_dbg(ql_dbg_disc, vha, 0x1051,
4466 "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
4467 fcport->current_login_state, fcport->last_login_state,
4468 fcport->d_id.b24, fcport->loop_id, rval);
4470 if (((fcport->current_login_state & 0xf) == 0x4) ||
4471 ((fcport->current_login_state & 0xf) == 0x6))
4475 if (logged_in || greater_wwpn) {
4476 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
4477 qla_nvme_register_hba(vha);
4479 /* Set connected N_Port d_id */
4480 if (vha->flags.nvme_enabled)
4481 fcport->fc4f_nvme = 1;
4483 fcport->scan_state = QLA_FCPORT_FOUND;
4484 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4485 fcport->disc_state = DSC_GNL;
4486 fcport->n2n_flag = 1;
4488 vha->hw->flags.gpsc_supported = 0;
4491 ql_dbg(ql_dbg_disc, vha, 0x20e5,
4492 "%s %d PLOGI ELS %8phC\n",
4493 __func__, __LINE__, fcport->port_name);
4495 res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
4496 fcport, fcport->d_id);
4499 if (res != QLA_SUCCESS) {
4500 ql_log(ql_log_info, vha, 0xd04d,
4501 "PLOGI Failed: portid=%06x - retrying\n",
4505 /* State 0x6 means FCP PRLI complete */
4506 if ((fcport->current_login_state & 0xf) == 0x6) {
4507 ql_dbg(ql_dbg_disc, vha, 0x2118,
4508 "%s %d %8phC post GPDB work\n",
4509 __func__, __LINE__, fcport->port_name);
4510 fcport->chip_reset =
4511 vha->hw->base_qpair->chip_reset;
4512 qla24xx_post_gpdb_work(vha, fcport, 0);
4514 ql_dbg(ql_dbg_disc, vha, 0x2118,
4515 "%s %d %8phC post NVMe PRLI\n",
4516 __func__, __LINE__, fcport->port_name);
4517 qla24xx_post_prli_work(vha, fcport);
4521 /* Wait for next database change */
4522 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4529 * qla2x00_configure_local_loop
4530 * Updates Fibre Channel Device Database with local loop devices.
4533 * ha = adapter block pointer.
4539 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4544 fc_port_t *fcport, *new_fcport;
4550 uint8_t domain, area, al_pa;
4551 struct qla_hw_data *ha = vha->hw;
4552 unsigned long flags;
4556 entries = MAX_FIBRE_DEVICES_LOOP;
4558 /* Get list of logged in devices. */
4559 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
4560 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
4562 if (rval != QLA_SUCCESS)
4563 goto cleanup_allocation;
4565 ql_dbg(ql_dbg_disc, vha, 0x2011,
4566 "Entries in ID list (%d).\n", entries);
4567 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
4568 (uint8_t *)ha->gid_list,
4569 entries * sizeof(struct gid_list_info));
4571 /* Allocate temporary fcport for any new fcports discovered. */
4572 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4573 if (new_fcport == NULL) {
4574 ql_log(ql_log_warn, vha, 0x2012,
4575 "Memory allocation failed for fcport.\n");
4576 rval = QLA_MEMORY_ALLOC_FAILED;
4577 goto cleanup_allocation;
4579 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4582 * Mark local devices that were present with FCF_DEVICE_LOST for now.
4584 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4585 if (atomic_read(&fcport->state) == FCS_ONLINE &&
4586 fcport->port_type != FCT_BROADCAST &&
4587 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4589 ql_dbg(ql_dbg_disc, vha, 0x2096,
4590 "Marking port lost loop_id=0x%04x.\n",
4593 qla2x00_mark_device_lost(vha, fcport, 0, 0);
4597 /* Inititae N2N login. */
4598 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
4599 rval = qla24xx_n2n_handle_login(vha, new_fcport);
4600 if (rval != QLA_SUCCESS)
4601 goto cleanup_allocation;
4605 /* Add devices to port list. */
4606 id_iter = (char *)ha->gid_list;
4607 for (index = 0; index < entries; index++) {
4608 domain = ((struct gid_list_info *)id_iter)->domain;
4609 area = ((struct gid_list_info *)id_iter)->area;
4610 al_pa = ((struct gid_list_info *)id_iter)->al_pa;
4611 if (IS_QLA2100(ha) || IS_QLA2200(ha))
4612 loop_id = (uint16_t)
4613 ((struct gid_list_info *)id_iter)->loop_id_2100;
4615 loop_id = le16_to_cpu(
4616 ((struct gid_list_info *)id_iter)->loop_id);
4617 id_iter += ha->gid_list_info_size;
4619 /* Bypass reserved domain fields. */
4620 if ((domain & 0xf0) == 0xf0)
4623 /* Bypass if not same domain and area of adapter. */
4624 if (area && domain &&
4625 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
4628 /* Bypass invalid local loop ID. */
4629 if (loop_id > LAST_LOCAL_LOOP_ID)
4632 memset(new_fcport->port_name, 0, WWN_SIZE);
4634 /* Fill in member data. */
4635 new_fcport->d_id.b.domain = domain;
4636 new_fcport->d_id.b.area = area;
4637 new_fcport->d_id.b.al_pa = al_pa;
4638 new_fcport->loop_id = loop_id;
4640 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
4641 if (rval2 != QLA_SUCCESS) {
4642 ql_dbg(ql_dbg_disc, vha, 0x2097,
4643 "Failed to retrieve fcport information "
4644 "-- get_port_database=%x, loop_id=0x%04x.\n",
4645 rval2, new_fcport->loop_id);
4646 /* Skip retry if N2N */
4647 if (ha->current_topology != ISP_CFG_N) {
4648 ql_dbg(ql_dbg_disc, vha, 0x2105,
4649 "Scheduling resync.\n");
4650 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4655 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4656 /* Check for matching device in port list. */
4659 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4660 if (memcmp(new_fcport->port_name, fcport->port_name,
4664 fcport->flags &= ~FCF_FABRIC_DEVICE;
4665 fcport->loop_id = new_fcport->loop_id;
4666 fcport->port_type = new_fcport->port_type;
4667 fcport->d_id.b24 = new_fcport->d_id.b24;
4668 memcpy(fcport->node_name, new_fcport->node_name,
4671 if (!fcport->login_succ) {
4672 vha->fcport_count++;
4673 fcport->login_succ = 1;
4674 fcport->disc_state = DSC_LOGIN_COMPLETE;
4682 /* New device, add to fcports list. */
4683 list_add_tail(&new_fcport->list, &vha->vp_fcports);
4685 /* Allocate a new replacement fcport. */
4686 fcport = new_fcport;
4687 if (!fcport->login_succ) {
4688 vha->fcport_count++;
4689 fcport->login_succ = 1;
4690 fcport->disc_state = DSC_LOGIN_COMPLETE;
4693 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4695 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4697 if (new_fcport == NULL) {
4698 ql_log(ql_log_warn, vha, 0xd031,
4699 "Failed to allocate memory for fcport.\n");
4700 rval = QLA_MEMORY_ALLOC_FAILED;
4701 goto cleanup_allocation;
4703 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4704 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4707 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4709 /* Base iIDMA settings on HBA port speed. */
4710 fcport->fp_speed = ha->link_data_rate;
4712 qla2x00_update_fcport(vha, fcport);
4720 if (rval != QLA_SUCCESS) {
4721 ql_dbg(ql_dbg_disc, vha, 0x2098,
4722 "Configure local loop error exit: rval=%x.\n", rval);
4729 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
4732 uint16_t mb[MAILBOX_REGISTER_COUNT];
4733 struct qla_hw_data *ha = vha->hw;
4735 if (!IS_IIDMA_CAPABLE(ha))
4738 if (atomic_read(&fcport->state) != FCS_ONLINE)
4741 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
4742 fcport->fp_speed > ha->link_data_rate)
4745 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
4747 if (rval != QLA_SUCCESS) {
4748 ql_dbg(ql_dbg_disc, vha, 0x2004,
4749 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
4750 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
4752 ql_dbg(ql_dbg_disc, vha, 0x2005,
4753 "iIDMA adjusted to %s GB/s on %8phN.\n",
4754 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
4759 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
4761 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
4763 struct fc_rport_identifiers rport_ids;
4764 struct fc_rport *rport;
4765 unsigned long flags;
4767 rport_ids.node_name = wwn_to_u64(fcport->node_name);
4768 rport_ids.port_name = wwn_to_u64(fcport->port_name);
4769 rport_ids.port_id = fcport->d_id.b.domain << 16 |
4770 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
4771 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4772 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
4774 ql_log(ql_log_warn, vha, 0x2006,
4775 "Unable to allocate fc remote port.\n");
4779 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4780 *((fc_port_t **)rport->dd_data) = fcport;
4781 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4783 rport->supported_classes = fcport->supported_classes;
4785 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4786 if (fcport->port_type == FCT_INITIATOR)
4787 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
4788 if (fcport->port_type == FCT_TARGET)
4789 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
4791 ql_dbg(ql_dbg_disc, vha, 0x20ee,
4792 "%s %8phN. rport %p is %s mode\n",
4793 __func__, fcport->port_name, rport,
4794 (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
4796 fc_remote_port_rolechg(rport, rport_ids.roles);
4800 * qla2x00_update_fcport
4801 * Updates device on list.
4804 * ha = adapter block pointer.
4805 * fcport = port structure pointer.
4815 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
4819 if (IS_SW_RESV_ADDR(fcport->d_id))
4822 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
4823 __func__, fcport->port_name);
4825 if (IS_QLAFX00(vha->hw)) {
4826 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
4829 fcport->login_retry = 0;
4830 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4831 fcport->disc_state = DSC_LOGIN_COMPLETE;
4832 fcport->deleted = 0;
4833 fcport->logout_on_delete = 1;
4835 if (fcport->fc4f_nvme) {
4836 qla_nvme_register_remote(vha, fcport);
4840 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
4841 qla2x00_iidma_fcport(vha, fcport);
4842 qla24xx_update_fcport_fcp_prio(vha, fcport);
4845 switch (vha->host->active_mode) {
4846 case MODE_INITIATOR:
4847 qla2x00_reg_remote_port(vha, fcport);
4850 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4851 !vha->vha_tgt.qla_tgt->tgt_stopped)
4852 qlt_fc_port_added(vha, fcport);
4855 qla2x00_reg_remote_port(vha, fcport);
4856 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
4857 !vha->vha_tgt.qla_tgt->tgt_stopped)
4858 qlt_fc_port_added(vha, fcport);
4866 * qla2x00_configure_fabric
4867 * Setup SNS devices with loop ID's.
4870 * ha = adapter block pointer.
4877 qla2x00_configure_fabric(scsi_qla_host_t *vha)
4881 uint16_t mb[MAILBOX_REGISTER_COUNT];
4883 LIST_HEAD(new_fcports);
4884 struct qla_hw_data *ha = vha->hw;
4887 /* If FL port exists, then SNS is present */
4888 if (IS_FWI2_CAPABLE(ha))
4889 loop_id = NPH_F_PORT;
4891 loop_id = SNS_FL_PORT;
4892 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
4893 if (rval != QLA_SUCCESS) {
4894 ql_dbg(ql_dbg_disc, vha, 0x20a0,
4895 "MBX_GET_PORT_NAME failed, No FL Port.\n");
4897 vha->device_flags &= ~SWITCH_FOUND;
4898 return (QLA_SUCCESS);
4900 vha->device_flags |= SWITCH_FOUND;
4903 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
4904 rval = qla2x00_send_change_request(vha, 0x3, 0);
4905 if (rval != QLA_SUCCESS)
4906 ql_log(ql_log_warn, vha, 0x121,
4907 "Failed to enable receiving of RSCN requests: 0x%x.\n",
4913 qla2x00_mgmt_svr_login(vha);
4916 if (ql2xfdmienable &&
4917 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
4918 qla2x00_fdmi_register(vha);
4920 /* Ensure we are logged into the SNS. */
4921 loop_id = NPH_SNS_LID(ha);
4922 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
4923 0xfc, mb, BIT_1|BIT_0);
4924 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
4925 ql_dbg(ql_dbg_disc, vha, 0x20a1,
4926 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
4927 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
4928 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4931 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
4932 if (qla2x00_rft_id(vha)) {
4934 ql_dbg(ql_dbg_disc, vha, 0x20a2,
4935 "Register FC-4 TYPE failed.\n");
4936 if (test_bit(LOOP_RESYNC_NEEDED,
4940 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
4942 ql_dbg(ql_dbg_disc, vha, 0x209a,
4943 "Register FC-4 Features failed.\n");
4944 if (test_bit(LOOP_RESYNC_NEEDED,
4948 if (vha->flags.nvme_enabled) {
4949 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
4950 ql_dbg(ql_dbg_disc, vha, 0x2049,
4951 "Register NVME FC Type Features failed.\n");
4954 if (qla2x00_rnn_id(vha)) {
4956 ql_dbg(ql_dbg_disc, vha, 0x2104,
4957 "Register Node Name failed.\n");
4958 if (test_bit(LOOP_RESYNC_NEEDED,
4961 } else if (qla2x00_rsnn_nn(vha)) {
4963 ql_dbg(ql_dbg_disc, vha, 0x209b,
4964 "Register Symbolic Node Name failed.\n");
4965 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4970 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4971 fcport->scan_state = QLA_FCPORT_SCAN;
4974 /* Mark the time right before querying FW for connected ports.
4975 * This process is long, asynchronous and by the time it's done,
4976 * collected information might not be accurate anymore. E.g.
4977 * disconnected port might have re-connected and a brand new
4978 * session has been created. In this case session's generation
4979 * will be newer than discovery_gen. */
4980 qlt_do_generation_tick(vha, &discovery_gen);
4982 rval = qla2x00_find_all_fabric_devs(vha);
4983 if (rval != QLA_SUCCESS)
4987 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
4988 qla_nvme_register_hba(vha);
4991 ql_dbg(ql_dbg_disc, vha, 0x2068,
4992 "Configure fabric error exit rval=%d.\n", rval);
4998 * qla2x00_find_all_fabric_devs
5001 * ha = adapter block pointer.
5002 * dev = database device entry pointer.
5011 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
5015 fc_port_t *fcport, *new_fcport;
5020 int first_dev, last_dev;
5021 port_id_t wrap = {}, nxt_d_id;
5022 struct qla_hw_data *ha = vha->hw;
5023 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5024 unsigned long flags;
5028 /* Try GID_PT to get device list, else GAN. */
5030 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
5035 ql_dbg(ql_dbg_disc, vha, 0x209c,
5036 "GID_PT allocations failed, fallback on GA_NXT.\n");
5038 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
5039 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
5041 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5043 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
5045 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5047 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
5049 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5051 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5053 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5057 /* If other queries succeeded probe for FC-4 type */
5059 qla2x00_gff_id(vha, swl);
5060 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5066 /* Allocate temporary fcport for any new fcports discovered. */
5067 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5068 if (new_fcport == NULL) {
5069 ql_log(ql_log_warn, vha, 0x209d,
5070 "Failed to allocate memory for fcport.\n");
5071 return (QLA_MEMORY_ALLOC_FAILED);
5073 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5074 /* Set start port ID scan at adapter ID. */
5078 /* Starting free loop ID. */
5079 loop_id = ha->min_external_loopid;
5080 for (; loop_id <= ha->max_loop_id; loop_id++) {
5081 if (qla2x00_is_reserved_id(vha, loop_id))
5084 if (ha->current_topology == ISP_CFG_FL &&
5085 (atomic_read(&vha->loop_down_timer) ||
5086 LOOP_TRANSITION(vha))) {
5087 atomic_set(&vha->loop_down_timer, 0);
5088 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5089 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5095 wrap.b24 = new_fcport->d_id.b24;
5097 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5098 memcpy(new_fcport->node_name,
5099 swl[swl_idx].node_name, WWN_SIZE);
5100 memcpy(new_fcport->port_name,
5101 swl[swl_idx].port_name, WWN_SIZE);
5102 memcpy(new_fcport->fabric_port_name,
5103 swl[swl_idx].fabric_port_name, WWN_SIZE);
5104 new_fcport->fp_speed = swl[swl_idx].fp_speed;
5105 new_fcport->fc4_type = swl[swl_idx].fc4_type;
5107 new_fcport->nvme_flag = 0;
5108 new_fcport->fc4f_nvme = 0;
5109 if (vha->flags.nvme_enabled &&
5110 swl[swl_idx].fc4f_nvme) {
5111 new_fcport->fc4f_nvme =
5112 swl[swl_idx].fc4f_nvme;
5113 ql_log(ql_log_info, vha, 0x2131,
5114 "FOUND: NVME port %8phC as FC Type 28h\n",
5115 new_fcport->port_name);
5118 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5124 /* Send GA_NXT to the switch */
5125 rval = qla2x00_ga_nxt(vha, new_fcport);
5126 if (rval != QLA_SUCCESS) {
5127 ql_log(ql_log_warn, vha, 0x209e,
5128 "SNS scan failed -- assuming "
5129 "zero-entry result.\n");
5135 /* If wrap on switch device list, exit. */
5137 wrap.b24 = new_fcport->d_id.b24;
5139 } else if (new_fcport->d_id.b24 == wrap.b24) {
5140 ql_dbg(ql_dbg_disc, vha, 0x209f,
5141 "Device wrap (%02x%02x%02x).\n",
5142 new_fcport->d_id.b.domain,
5143 new_fcport->d_id.b.area,
5144 new_fcport->d_id.b.al_pa);
5148 /* Bypass if same physical adapter. */
5149 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
5152 /* Bypass virtual ports of the same host. */
5153 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5156 /* Bypass if same domain and area of adapter. */
5157 if (((new_fcport->d_id.b24 & 0xffff00) ==
5158 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
5162 /* Bypass reserved domain fields. */
5163 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5166 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
5167 if (ql2xgffidenable &&
5168 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
5169 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
5172 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5174 /* Locate matching device in database. */
5176 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5177 if (memcmp(new_fcport->port_name, fcport->port_name,
5181 fcport->scan_state = QLA_FCPORT_FOUND;
5185 /* Update port state. */
5186 memcpy(fcport->fabric_port_name,
5187 new_fcport->fabric_port_name, WWN_SIZE);
5188 fcport->fp_speed = new_fcport->fp_speed;
5191 * If address the same and state FCS_ONLINE
5192 * (or in target mode), nothing changed.
5194 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
5195 (atomic_read(&fcport->state) == FCS_ONLINE ||
5196 (vha->host->active_mode == MODE_TARGET))) {
5201 * If device was not a fabric device before.
5203 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5204 fcport->d_id.b24 = new_fcport->d_id.b24;
5205 qla2x00_clear_loop_id(fcport);
5206 fcport->flags |= (FCF_FABRIC_DEVICE |
5212 * Port ID changed or device was marked to be updated;
5213 * Log it out if still logged in and mark it for
5216 if (qla_tgt_mode_enabled(base_vha)) {
5217 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
5218 "port changed FC ID, %8phC"
5219 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
5221 fcport->d_id.b.domain,
5222 fcport->d_id.b.area,
5223 fcport->d_id.b.al_pa,
5225 new_fcport->d_id.b.domain,
5226 new_fcport->d_id.b.area,
5227 new_fcport->d_id.b.al_pa);
5228 fcport->d_id.b24 = new_fcport->d_id.b24;
5232 fcport->d_id.b24 = new_fcport->d_id.b24;
5233 fcport->flags |= FCF_LOGIN_NEEDED;
5238 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5241 /* If device was not in our fcports list, then add it. */
5242 new_fcport->scan_state = QLA_FCPORT_FOUND;
5243 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5245 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5248 /* Allocate a new replacement fcport. */
5249 nxt_d_id.b24 = new_fcport->d_id.b24;
5250 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5251 if (new_fcport == NULL) {
5252 ql_log(ql_log_warn, vha, 0xd032,
5253 "Memory allocation failed for fcport.\n");
5254 return (QLA_MEMORY_ALLOC_FAILED);
5256 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5257 new_fcport->d_id.b24 = nxt_d_id.b24;
5260 qla2x00_free_fcport(new_fcport);
5263 * Logout all previous fabric dev marked lost, except FCP2 devices.
5265 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5266 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5269 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
5270 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
5273 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5274 if ((qla_dual_mode_enabled(vha) ||
5275 qla_ini_mode_enabled(vha)) &&
5276 atomic_read(&fcport->state) == FCS_ONLINE) {
5277 qla2x00_mark_device_lost(vha, fcport,
5278 ql2xplogiabsentdevice, 0);
5279 if (fcport->loop_id != FC_NO_LOOP_ID &&
5280 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5281 fcport->port_type != FCT_INITIATOR &&
5282 fcport->port_type != FCT_BROADCAST) {
5283 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5284 "%s %d %8phC post del sess\n",
5288 qlt_schedule_sess_for_deletion_lock
5295 if (fcport->scan_state == QLA_FCPORT_FOUND)
5296 qla24xx_fcport_handle_login(vha, fcport);
5302 * qla2x00_find_new_loop_id
5303 * Scan through our port list and find a new usable loop ID.
5306 * ha: adapter state pointer.
5307 * dev: port structure pointer.
5310 * qla2x00 local function return status code.
5316 qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
5319 struct qla_hw_data *ha = vha->hw;
5320 unsigned long flags = 0;
5324 spin_lock_irqsave(&ha->vport_slock, flags);
5326 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
5328 if (dev->loop_id >= LOOPID_MAP_SIZE ||
5329 qla2x00_is_reserved_id(vha, dev->loop_id)) {
5330 dev->loop_id = FC_NO_LOOP_ID;
5331 rval = QLA_FUNCTION_FAILED;
5333 set_bit(dev->loop_id, ha->loop_id_map);
5335 spin_unlock_irqrestore(&ha->vport_slock, flags);
5337 if (rval == QLA_SUCCESS)
5338 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
5339 "Assigning new loopid=%x, portid=%x.\n",
5340 dev->loop_id, dev->d_id.b24);
5342 ql_log(ql_log_warn, dev->vha, 0x2087,
5343 "No loop_id's available, portid=%x.\n",
5351 * qla2x00_fabric_login
5352 * Issue fabric login command.
5355 * ha = adapter block pointer.
5356 * device = pointer to FC device type structure.
5359 * 0 - Login successfully
5361 * 2 - Initiator device
5365 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
5366 uint16_t *next_loopid)
5370 uint16_t tmp_loopid;
5371 uint16_t mb[MAILBOX_REGISTER_COUNT];
5372 struct qla_hw_data *ha = vha->hw;
5378 ql_dbg(ql_dbg_disc, vha, 0x2000,
5379 "Trying Fabric Login w/loop id 0x%04x for port "
5381 fcport->loop_id, fcport->d_id.b.domain,
5382 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5384 /* Login fcport on switch. */
5385 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
5386 fcport->d_id.b.domain, fcport->d_id.b.area,
5387 fcport->d_id.b.al_pa, mb, BIT_0);
5388 if (rval != QLA_SUCCESS) {
5391 if (mb[0] == MBS_PORT_ID_USED) {
5393 * Device has another loop ID. The firmware team
5394 * recommends the driver perform an implicit login with
5395 * the specified ID again. The ID we just used is save
5396 * here so we return with an ID that can be tried by
5400 tmp_loopid = fcport->loop_id;
5401 fcport->loop_id = mb[1];
5403 ql_dbg(ql_dbg_disc, vha, 0x2001,
5404 "Fabric Login: port in use - next loop "
5405 "id=0x%04x, port id= %02x%02x%02x.\n",
5406 fcport->loop_id, fcport->d_id.b.domain,
5407 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5409 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
5414 /* A retry occurred before. */
5415 *next_loopid = tmp_loopid;
5418 * No retry occurred before. Just increment the
5419 * ID value for next login.
5421 *next_loopid = (fcport->loop_id + 1);
5424 if (mb[1] & BIT_0) {
5425 fcport->port_type = FCT_INITIATOR;
5427 fcport->port_type = FCT_TARGET;
5428 if (mb[1] & BIT_1) {
5429 fcport->flags |= FCF_FCP2_DEVICE;
5434 fcport->supported_classes |= FC_COS_CLASS2;
5436 fcport->supported_classes |= FC_COS_CLASS3;
5438 if (IS_FWI2_CAPABLE(ha)) {
5441 FCF_CONF_COMP_SUPPORTED;
5446 } else if (mb[0] == MBS_LOOP_ID_USED) {
5448 * Loop ID already used, try next loop ID.
5451 rval = qla2x00_find_new_loop_id(vha, fcport);
5452 if (rval != QLA_SUCCESS) {
5453 /* Ran out of loop IDs to use */
5456 } else if (mb[0] == MBS_COMMAND_ERROR) {
5458 * Firmware possibly timed out during login. If NO
5459 * retries are left to do then the device is declared
5462 *next_loopid = fcport->loop_id;
5463 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
5464 fcport->d_id.b.domain, fcport->d_id.b.area,
5465 fcport->d_id.b.al_pa);
5466 qla2x00_mark_device_lost(vha, fcport, 1, 0);
5472 * unrecoverable / not handled error
5474 ql_dbg(ql_dbg_disc, vha, 0x2002,
5475 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
5476 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
5477 fcport->d_id.b.area, fcport->d_id.b.al_pa,
5478 fcport->loop_id, jiffies);
5480 *next_loopid = fcport->loop_id;
5481 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
5482 fcport->d_id.b.domain, fcport->d_id.b.area,
5483 fcport->d_id.b.al_pa);
5484 qla2x00_clear_loop_id(fcport);
5485 fcport->login_retry = 0;
5496 * qla2x00_local_device_login
5497 * Issue local device login command.
5500 * ha = adapter block pointer.
5501 * loop_id = loop id of device to login to.
5503 * Returns (Where's the #define!!!!):
5504 * 0 - Login successfully
5509 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
5512 uint16_t mb[MAILBOX_REGISTER_COUNT];
5514 memset(mb, 0, sizeof(mb));
5515 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
5516 if (rval == QLA_SUCCESS) {
5517 /* Interrogate mailbox registers for any errors */
5518 if (mb[0] == MBS_COMMAND_ERROR)
5520 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
5521 /* device not in PCB table */
5529 * qla2x00_loop_resync
5530 * Resync with fibre channel devices.
5533 * ha = adapter block pointer.
5539 qla2x00_loop_resync(scsi_qla_host_t *vha)
5541 int rval = QLA_SUCCESS;
5543 struct req_que *req;
5544 struct rsp_que *rsp;
5549 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5550 if (vha->flags.online) {
5551 if (!(rval = qla2x00_fw_ready(vha))) {
5552 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5555 if (!IS_QLAFX00(vha->hw)) {
5557 * Issue a marker after FW becomes
5560 qla2x00_marker(vha, req, rsp, 0, 0,
5562 vha->marker_needed = 0;
5565 /* Remap devices on Loop. */
5566 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5568 if (IS_QLAFX00(vha->hw))
5569 qlafx00_configure_devices(vha);
5571 qla2x00_configure_loop(vha);
5574 } while (!atomic_read(&vha->loop_down_timer) &&
5575 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5576 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
5581 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
5582 return (QLA_FUNCTION_FAILED);
5585 ql_dbg(ql_dbg_disc, vha, 0x206c,
5586 "%s *** FAILED ***.\n", __func__);
5592 * qla2x00_perform_loop_resync
5593 * Description: This function will set the appropriate flags and call
5594 * qla2x00_loop_resync. If successful loop will be resynced
5595 * Arguments : scsi_qla_host_t pointer
5596 * returm : Success or Failure
5599 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
5603 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
5604 /*Configure the flags so that resync happens properly*/
5605 atomic_set(&ha->loop_down_timer, 0);
5606 if (!(ha->device_flags & DFLG_NO_CABLE)) {
5607 atomic_set(&ha->loop_state, LOOP_UP);
5608 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
5609 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
5610 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
5612 rval = qla2x00_loop_resync(ha);
5614 atomic_set(&ha->loop_state, LOOP_DEAD);
5616 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
5623 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
5626 struct scsi_qla_host *vha;
5627 struct qla_hw_data *ha = base_vha->hw;
5628 unsigned long flags;
5630 spin_lock_irqsave(&ha->vport_slock, flags);
5631 /* Go with deferred removal of rport references. */
5632 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
5633 atomic_inc(&vha->vref_count);
5634 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5635 if (fcport->drport &&
5636 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
5637 spin_unlock_irqrestore(&ha->vport_slock, flags);
5638 qla2x00_rport_del(fcport);
5640 spin_lock_irqsave(&ha->vport_slock, flags);
5643 atomic_dec(&vha->vref_count);
5644 wake_up(&vha->vref_waitq);
5646 spin_unlock_irqrestore(&ha->vport_slock, flags);
5649 /* Assumes idc_lock always held on entry */
5651 qla83xx_reset_ownership(scsi_qla_host_t *vha)
5653 struct qla_hw_data *ha = vha->hw;
5654 uint32_t drv_presence, drv_presence_mask;
5655 uint32_t dev_part_info1, dev_part_info2, class_type;
5656 uint32_t class_type_mask = 0x3;
5657 uint16_t fcoe_other_function = 0xffff, i;
5659 if (IS_QLA8044(ha)) {
5660 drv_presence = qla8044_rd_direct(vha,
5661 QLA8044_CRB_DRV_ACTIVE_INDEX);
5662 dev_part_info1 = qla8044_rd_direct(vha,
5663 QLA8044_CRB_DEV_PART_INFO_INDEX);
5664 dev_part_info2 = qla8044_rd_direct(vha,
5665 QLA8044_CRB_DEV_PART_INFO2);
5667 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5668 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
5669 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
5671 for (i = 0; i < 8; i++) {
5672 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
5673 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5674 (i != ha->portnum)) {
5675 fcoe_other_function = i;
5679 if (fcoe_other_function == 0xffff) {
5680 for (i = 0; i < 8; i++) {
5681 class_type = ((dev_part_info2 >> (i * 4)) &
5683 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
5684 ((i + 8) != ha->portnum)) {
5685 fcoe_other_function = i + 8;
5691 * Prepare drv-presence mask based on fcoe functions present.
5692 * However consider only valid physical fcoe function numbers (0-15).
5694 drv_presence_mask = ~((1 << (ha->portnum)) |
5695 ((fcoe_other_function == 0xffff) ?
5696 0 : (1 << (fcoe_other_function))));
5698 /* We are the reset owner iff:
5699 * - No other protocol drivers present.
5700 * - This is the lowest among fcoe functions. */
5701 if (!(drv_presence & drv_presence_mask) &&
5702 (ha->portnum < fcoe_other_function)) {
5703 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
5704 "This host is Reset owner.\n");
5705 ha->flags.nic_core_reset_owner = 1;
5710 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
5712 int rval = QLA_SUCCESS;
5713 struct qla_hw_data *ha = vha->hw;
5716 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5717 if (rval == QLA_SUCCESS) {
5718 drv_ack |= (1 << ha->portnum);
5719 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5726 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
5728 int rval = QLA_SUCCESS;
5729 struct qla_hw_data *ha = vha->hw;
5732 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
5733 if (rval == QLA_SUCCESS) {
5734 drv_ack &= ~(1 << ha->portnum);
5735 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
5742 qla83xx_dev_state_to_string(uint32_t dev_state)
5744 switch (dev_state) {
5745 case QLA8XXX_DEV_COLD:
5746 return "COLD/RE-INIT";
5747 case QLA8XXX_DEV_INITIALIZING:
5748 return "INITIALIZING";
5749 case QLA8XXX_DEV_READY:
5751 case QLA8XXX_DEV_NEED_RESET:
5752 return "NEED RESET";
5753 case QLA8XXX_DEV_NEED_QUIESCENT:
5754 return "NEED QUIESCENT";
5755 case QLA8XXX_DEV_FAILED:
5757 case QLA8XXX_DEV_QUIESCENT:
5764 /* Assumes idc-lock always held on entry */
5766 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
5768 struct qla_hw_data *ha = vha->hw;
5769 uint32_t idc_audit_reg = 0, duration_secs = 0;
5771 switch (audit_type) {
5772 case IDC_AUDIT_TIMESTAMP:
5773 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
5774 idc_audit_reg = (ha->portnum) |
5775 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
5776 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5779 case IDC_AUDIT_COMPLETION:
5780 duration_secs = ((jiffies_to_msecs(jiffies) -
5781 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
5782 idc_audit_reg = (ha->portnum) |
5783 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
5784 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
5788 ql_log(ql_log_warn, vha, 0xb078,
5789 "Invalid audit type specified.\n");
5794 /* Assumes idc_lock always held on entry */
5796 qla83xx_initiating_reset(scsi_qla_host_t *vha)
5798 struct qla_hw_data *ha = vha->hw;
5799 uint32_t idc_control, dev_state;
5801 __qla83xx_get_idc_control(vha, &idc_control);
5802 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
5803 ql_log(ql_log_info, vha, 0xb080,
5804 "NIC Core reset has been disabled. idc-control=0x%x\n",
5806 return QLA_FUNCTION_FAILED;
5809 /* Set NEED-RESET iff in READY state and we are the reset-owner */
5810 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5811 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
5812 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
5813 QLA8XXX_DEV_NEED_RESET);
5814 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
5815 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
5817 const char *state = qla83xx_dev_state_to_string(dev_state);
5818 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
5820 /* SV: XXX: Is timeout required here? */
5821 /* Wait for IDC state change READY -> NEED_RESET */
5822 while (dev_state == QLA8XXX_DEV_READY) {
5823 qla83xx_idc_unlock(vha, 0);
5825 qla83xx_idc_lock(vha, 0);
5826 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5830 /* Send IDC ack by writing to drv-ack register */
5831 __qla83xx_set_drv_ack(vha);
5837 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
5839 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5843 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
5845 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
5849 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
5851 uint32_t drv_presence = 0;
5852 struct qla_hw_data *ha = vha->hw;
5854 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
5855 if (drv_presence & (1 << ha->portnum))
5858 return QLA_TEST_FAILED;
5862 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
5864 int rval = QLA_SUCCESS;
5865 struct qla_hw_data *ha = vha->hw;
5867 ql_dbg(ql_dbg_p3p, vha, 0xb058,
5868 "Entered %s().\n", __func__);
5870 if (vha->device_flags & DFLG_DEV_FAILED) {
5871 ql_log(ql_log_warn, vha, 0xb059,
5872 "Device in unrecoverable FAILED state.\n");
5873 return QLA_FUNCTION_FAILED;
5876 qla83xx_idc_lock(vha, 0);
5878 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
5879 ql_log(ql_log_warn, vha, 0xb05a,
5880 "Function=0x%x has been removed from IDC participation.\n",
5882 rval = QLA_FUNCTION_FAILED;
5886 qla83xx_reset_ownership(vha);
5888 rval = qla83xx_initiating_reset(vha);
5891 * Perform reset if we are the reset-owner,
5892 * else wait till IDC state changes to READY/FAILED.
5894 if (rval == QLA_SUCCESS) {
5895 rval = qla83xx_idc_state_handler(vha);
5897 if (rval == QLA_SUCCESS)
5898 ha->flags.nic_core_hung = 0;
5899 __qla83xx_clear_drv_ack(vha);
5903 qla83xx_idc_unlock(vha, 0);
5905 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
5911 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
5913 struct qla_hw_data *ha = vha->hw;
5914 int rval = QLA_FUNCTION_FAILED;
5916 if (!IS_MCTP_CAPABLE(ha)) {
5917 /* This message can be removed from the final version */
5918 ql_log(ql_log_info, vha, 0x506d,
5919 "This board is not MCTP capable\n");
5923 if (!ha->mctp_dump) {
5924 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
5925 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
5927 if (!ha->mctp_dump) {
5928 ql_log(ql_log_warn, vha, 0x506e,
5929 "Failed to allocate memory for mctp dump\n");
5934 #define MCTP_DUMP_STR_ADDR 0x00000000
5935 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
5936 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
5937 if (rval != QLA_SUCCESS) {
5938 ql_log(ql_log_warn, vha, 0x506f,
5939 "Failed to capture mctp dump\n");
5941 ql_log(ql_log_info, vha, 0x5070,
5942 "Mctp dump capture for host (%ld/%p).\n",
5943 vha->host_no, ha->mctp_dump);
5944 ha->mctp_dumped = 1;
5947 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
5948 ha->flags.nic_core_reset_hdlr_active = 1;
5949 rval = qla83xx_restart_nic_firmware(vha);
5951 /* NIC Core reset failed. */
5952 ql_log(ql_log_warn, vha, 0x5071,
5953 "Failed to restart nic firmware\n");
5955 ql_dbg(ql_dbg_p3p, vha, 0xb084,
5956 "Restarted NIC firmware successfully.\n");
5957 ha->flags.nic_core_reset_hdlr_active = 0;
5965 * qla2x00_quiesce_io
5966 * Description: This function will block the new I/Os
5967 * Its not aborting any I/Os as context
5968 * is not destroyed during quiescence
5969 * Arguments: scsi_qla_host_t
5973 qla2x00_quiesce_io(scsi_qla_host_t *vha)
5975 struct qla_hw_data *ha = vha->hw;
5976 struct scsi_qla_host *vp;
5978 ql_dbg(ql_dbg_dpc, vha, 0x401d,
5979 "Quiescing I/O - ha=%p.\n", ha);
5981 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
5982 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
5983 atomic_set(&vha->loop_state, LOOP_DOWN);
5984 qla2x00_mark_all_devices_lost(vha, 0);
5985 list_for_each_entry(vp, &ha->vp_list, list)
5986 qla2x00_mark_all_devices_lost(vp, 0);
5988 if (!atomic_read(&vha->loop_down_timer))
5989 atomic_set(&vha->loop_down_timer,
5992 /* Wait for pending cmds to complete */
5993 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
5997 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
5999 struct qla_hw_data *ha = vha->hw;
6000 struct scsi_qla_host *vp;
6001 unsigned long flags;
6005 /* For ISP82XX, driver waits for completion of the commands.
6006 * online flag should be set.
6008 if (!(IS_P3P_TYPE(ha)))
6009 vha->flags.online = 0;
6010 ha->flags.chip_reset_done = 0;
6011 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6012 vha->qla_stats.total_isp_aborts++;
6014 ql_log(ql_log_info, vha, 0x00af,
6015 "Performing ISP error recovery - ha=%p.\n", ha);
6017 /* For ISP82XX, reset_chip is just disabling interrupts.
6018 * Driver waits for the completion of the commands.
6019 * the interrupts need to be enabled.
6021 if (!(IS_P3P_TYPE(ha)))
6022 ha->isp_ops->reset_chip(vha);
6024 ha->flags.n2n_ae = 0;
6025 ha->flags.lip_ae = 0;
6026 ha->current_topology = 0;
6027 ha->flags.fw_started = 0;
6028 ha->flags.fw_init_done = 0;
6029 ha->base_qpair->chip_reset++;
6030 for (i = 0; i < ha->max_qpairs; i++) {
6031 if (ha->queue_pair_map[i])
6032 ha->queue_pair_map[i]->chip_reset =
6033 ha->base_qpair->chip_reset;
6036 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6037 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6038 atomic_set(&vha->loop_state, LOOP_DOWN);
6039 qla2x00_mark_all_devices_lost(vha, 0);
6041 spin_lock_irqsave(&ha->vport_slock, flags);
6042 list_for_each_entry(vp, &ha->vp_list, list) {
6043 atomic_inc(&vp->vref_count);
6044 spin_unlock_irqrestore(&ha->vport_slock, flags);
6046 qla2x00_mark_all_devices_lost(vp, 0);
6048 spin_lock_irqsave(&ha->vport_slock, flags);
6049 atomic_dec(&vp->vref_count);
6051 spin_unlock_irqrestore(&ha->vport_slock, flags);
6053 if (!atomic_read(&vha->loop_down_timer))
6054 atomic_set(&vha->loop_down_timer,
6058 /* Clear all async request states across all VPs. */
6059 list_for_each_entry(fcport, &vha->vp_fcports, list)
6060 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6061 spin_lock_irqsave(&ha->vport_slock, flags);
6062 list_for_each_entry(vp, &ha->vp_list, list) {
6063 atomic_inc(&vp->vref_count);
6064 spin_unlock_irqrestore(&ha->vport_slock, flags);
6066 list_for_each_entry(fcport, &vp->vp_fcports, list)
6067 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6069 spin_lock_irqsave(&ha->vport_slock, flags);
6070 atomic_dec(&vp->vref_count);
6072 spin_unlock_irqrestore(&ha->vport_slock, flags);
6074 if (!ha->flags.eeh_busy) {
6075 /* Make sure for ISP 82XX IO DMA is complete */
6076 if (IS_P3P_TYPE(ha)) {
6077 qla82xx_chip_reset_cleanup(vha);
6078 ql_log(ql_log_info, vha, 0x00b4,
6079 "Done chip reset cleanup.\n");
6081 /* Done waiting for pending commands.
6082 * Reset the online flag.
6084 vha->flags.online = 0;
6087 /* Requeue all commands in outstanding command list. */
6088 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6090 /* memory barrier */
6096 * Resets ISP and aborts all outstanding commands.
6099 * ha = adapter block pointer.
6105 qla2x00_abort_isp(scsi_qla_host_t *vha)
6109 struct qla_hw_data *ha = vha->hw;
6110 struct scsi_qla_host *vp;
6111 struct req_que *req = ha->req_q_map[0];
6112 unsigned long flags;
6114 if (vha->flags.online) {
6115 qla2x00_abort_isp_cleanup(vha);
6117 if (IS_QLA8031(ha)) {
6118 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6119 "Clearing fcoe driver presence.\n");
6120 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6121 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6122 "Error while clearing DRV-Presence.\n");
6125 if (unlikely(pci_channel_offline(ha->pdev) &&
6126 ha->flags.pci_channel_io_perm_failure)) {
6127 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6132 ha->isp_ops->get_flash_version(vha, req->ring);
6134 ha->isp_ops->nvram_config(vha);
6136 if (!qla2x00_restart_isp(vha)) {
6137 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6139 if (!atomic_read(&vha->loop_down_timer)) {
6141 * Issue marker command only when we are going
6142 * to start the I/O .
6144 vha->marker_needed = 1;
6147 vha->flags.online = 1;
6149 ha->isp_ops->enable_intrs(ha);
6151 ha->isp_abort_cnt = 0;
6152 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6154 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6155 qla2x00_get_fw_version(vha);
6157 ha->flags.fce_enabled = 1;
6159 fce_calc_size(ha->fce_bufs));
6160 rval = qla2x00_enable_fce_trace(vha,
6161 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6164 ql_log(ql_log_warn, vha, 0x8033,
6165 "Unable to reinitialize FCE "
6167 ha->flags.fce_enabled = 0;
6172 memset(ha->eft, 0, EFT_SIZE);
6173 rval = qla2x00_enable_eft_trace(vha,
6174 ha->eft_dma, EFT_NUM_BUFFERS);
6176 ql_log(ql_log_warn, vha, 0x8034,
6177 "Unable to reinitialize EFT "
6181 } else { /* failed the ISP abort */
6182 vha->flags.online = 1;
6183 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
6184 if (ha->isp_abort_cnt == 0) {
6185 ql_log(ql_log_fatal, vha, 0x8035,
6186 "ISP error recover failed - "
6187 "board disabled.\n");
6189 * The next call disables the board
6192 ha->isp_ops->reset_adapter(vha);
6193 vha->flags.online = 0;
6194 clear_bit(ISP_ABORT_RETRY,
6197 } else { /* schedule another ISP abort */
6198 ha->isp_abort_cnt--;
6199 ql_dbg(ql_dbg_taskm, vha, 0x8020,
6200 "ISP abort - retry remaining %d.\n",
6205 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
6206 ql_dbg(ql_dbg_taskm, vha, 0x8021,
6207 "ISP error recovery - retrying (%d) "
6208 "more times.\n", ha->isp_abort_cnt);
6209 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6217 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
6218 qla2x00_configure_hba(vha);
6219 spin_lock_irqsave(&ha->vport_slock, flags);
6220 list_for_each_entry(vp, &ha->vp_list, list) {
6222 atomic_inc(&vp->vref_count);
6223 spin_unlock_irqrestore(&ha->vport_slock, flags);
6225 qla2x00_vp_abort_isp(vp);
6227 spin_lock_irqsave(&ha->vport_slock, flags);
6228 atomic_dec(&vp->vref_count);
6231 spin_unlock_irqrestore(&ha->vport_slock, flags);
6233 if (IS_QLA8031(ha)) {
6234 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
6235 "Setting back fcoe driver presence.\n");
6236 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
6237 ql_dbg(ql_dbg_p3p, vha, 0xb074,
6238 "Error while setting DRV-Presence.\n");
6241 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
6249 * qla2x00_restart_isp
6250 * restarts the ISP after a reset
6253 * ha = adapter block pointer.
6259 qla2x00_restart_isp(scsi_qla_host_t *vha)
6262 struct qla_hw_data *ha = vha->hw;
6263 struct req_que *req = ha->req_q_map[0];
6264 struct rsp_que *rsp = ha->rsp_q_map[0];
6266 /* If firmware needs to be loaded */
6267 if (qla2x00_isp_firmware(vha)) {
6268 vha->flags.online = 0;
6269 status = ha->isp_ops->chip_diag(vha);
6271 status = qla2x00_setup_chip(vha);
6274 if (!status && !(status = qla2x00_init_rings(vha))) {
6275 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6276 ha->flags.chip_reset_done = 1;
6278 /* Initialize the queues in use */
6279 qla25xx_init_queues(ha);
6281 status = qla2x00_fw_ready(vha);
6283 /* Issue a marker after FW becomes ready. */
6284 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
6285 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6288 /* if no cable then assume it's good */
6289 if ((vha->device_flags & DFLG_NO_CABLE))
6296 qla25xx_init_queues(struct qla_hw_data *ha)
6298 struct rsp_que *rsp = NULL;
6299 struct req_que *req = NULL;
6300 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6304 for (i = 1; i < ha->max_rsp_queues; i++) {
6305 rsp = ha->rsp_q_map[i];
6306 if (rsp && test_bit(i, ha->rsp_qid_map)) {
6307 rsp->options &= ~BIT_0;
6308 ret = qla25xx_init_rsp_que(base_vha, rsp);
6309 if (ret != QLA_SUCCESS)
6310 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
6311 "%s Rsp que: %d init failed.\n",
6314 ql_dbg(ql_dbg_init, base_vha, 0x0100,
6315 "%s Rsp que: %d inited.\n",
6319 for (i = 1; i < ha->max_req_queues; i++) {
6320 req = ha->req_q_map[i];
6321 if (req && test_bit(i, ha->req_qid_map)) {
6322 /* Clear outstanding commands array. */
6323 req->options &= ~BIT_0;
6324 ret = qla25xx_init_req_que(base_vha, req);
6325 if (ret != QLA_SUCCESS)
6326 ql_dbg(ql_dbg_init, base_vha, 0x0101,
6327 "%s Req que: %d init failed.\n",
6330 ql_dbg(ql_dbg_init, base_vha, 0x0102,
6331 "%s Req que: %d inited.\n",
6339 * qla2x00_reset_adapter
6343 * ha = adapter block pointer.
6346 qla2x00_reset_adapter(scsi_qla_host_t *vha)
6348 unsigned long flags = 0;
6349 struct qla_hw_data *ha = vha->hw;
6350 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
6352 vha->flags.online = 0;
6353 ha->isp_ops->disable_intrs(ha);
6355 spin_lock_irqsave(&ha->hardware_lock, flags);
6356 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
6357 RD_REG_WORD(®->hccr); /* PCI Posting. */
6358 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
6359 RD_REG_WORD(®->hccr); /* PCI Posting. */
6360 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6364 qla24xx_reset_adapter(scsi_qla_host_t *vha)
6366 unsigned long flags = 0;
6367 struct qla_hw_data *ha = vha->hw;
6368 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
6370 if (IS_P3P_TYPE(ha))
6373 vha->flags.online = 0;
6374 ha->isp_ops->disable_intrs(ha);
6376 spin_lock_irqsave(&ha->hardware_lock, flags);
6377 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
6378 RD_REG_DWORD(®->hccr);
6379 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
6380 RD_REG_DWORD(®->hccr);
6381 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6383 if (IS_NOPOLLING_TYPE(ha))
6384 ha->isp_ops->enable_intrs(ha);
6387 /* On sparc systems, obtain port and node WWN from firmware
6390 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
6391 struct nvram_24xx *nv)
6394 struct qla_hw_data *ha = vha->hw;
6395 struct pci_dev *pdev = ha->pdev;
6396 struct device_node *dp = pci_device_to_OF_node(pdev);
6400 val = of_get_property(dp, "port-wwn", &len);
6401 if (val && len >= WWN_SIZE)
6402 memcpy(nv->port_name, val, WWN_SIZE);
6404 val = of_get_property(dp, "node-wwn", &len);
6405 if (val && len >= WWN_SIZE)
6406 memcpy(nv->node_name, val, WWN_SIZE);
6411 qla24xx_nvram_config(scsi_qla_host_t *vha)
6414 struct init_cb_24xx *icb;
6415 struct nvram_24xx *nv;
6417 uint8_t *dptr1, *dptr2;
6420 struct qla_hw_data *ha = vha->hw;
6423 icb = (struct init_cb_24xx *)ha->init_cb;
6426 /* Determine NVRAM starting address. */
6427 if (ha->port_no == 0) {
6428 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
6429 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
6431 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
6432 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
6435 ha->nvram_size = sizeof(struct nvram_24xx);
6436 ha->vpd_size = FA_NVRAM_VPD_SIZE;
6438 /* Get VPD data into cache */
6439 ha->vpd = ha->nvram + VPD_OFFSET;
6440 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
6441 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
6443 /* Get NVRAM data into cache and calculate checksum. */
6444 dptr = (uint32_t *)nv;
6445 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
6447 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
6448 chksum += le32_to_cpu(*dptr);
6450 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
6451 "Contents of NVRAM\n");
6452 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
6453 (uint8_t *)nv, ha->nvram_size);
6455 /* Bad NVRAM data, set defaults parameters. */
6456 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6457 || nv->id[3] != ' ' ||
6458 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
6459 /* Reset NVRAM data. */
6460 ql_log(ql_log_warn, vha, 0x006b,
6461 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
6462 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
6463 ql_log(ql_log_warn, vha, 0x006c,
6464 "Falling back to functioning (yet invalid -- WWPN) "
6468 * Set default initialization control block.
6470 memset(nv, 0, ha->nvram_size);
6471 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6472 nv->version = cpu_to_le16(ICB_VERSION);
6473 nv->frame_payload_size = 2048;
6474 nv->execution_throttle = cpu_to_le16(0xFFFF);
6475 nv->exchange_count = cpu_to_le16(0);
6476 nv->hard_address = cpu_to_le16(124);
6477 nv->port_name[0] = 0x21;
6478 nv->port_name[1] = 0x00 + ha->port_no + 1;
6479 nv->port_name[2] = 0x00;
6480 nv->port_name[3] = 0xe0;
6481 nv->port_name[4] = 0x8b;
6482 nv->port_name[5] = 0x1c;
6483 nv->port_name[6] = 0x55;
6484 nv->port_name[7] = 0x86;
6485 nv->node_name[0] = 0x20;
6486 nv->node_name[1] = 0x00;
6487 nv->node_name[2] = 0x00;
6488 nv->node_name[3] = 0xe0;
6489 nv->node_name[4] = 0x8b;
6490 nv->node_name[5] = 0x1c;
6491 nv->node_name[6] = 0x55;
6492 nv->node_name[7] = 0x86;
6493 qla24xx_nvram_wwn_from_ofw(vha, nv);
6494 nv->login_retry_count = cpu_to_le16(8);
6495 nv->interrupt_delay_timer = cpu_to_le16(0);
6496 nv->login_timeout = cpu_to_le16(0);
6497 nv->firmware_options_1 =
6498 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6499 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6500 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6501 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6502 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6503 nv->efi_parameters = cpu_to_le32(0);
6504 nv->reset_delay = 5;
6505 nv->max_luns_per_target = cpu_to_le16(128);
6506 nv->port_down_retry_count = cpu_to_le16(30);
6507 nv->link_down_timeout = cpu_to_le16(30);
6512 if (qla_tgt_mode_enabled(vha)) {
6513 /* Don't enable full login after initial LIP */
6514 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6515 /* Don't enable LIP full login for initiator */
6516 nv->host_p &= cpu_to_le32(~BIT_10);
6519 qlt_24xx_config_nvram_stage1(vha, nv);
6521 /* Reset Initialization control block */
6522 memset(icb, 0, ha->init_cb_size);
6524 /* Copy 1st segment. */
6525 dptr1 = (uint8_t *)icb;
6526 dptr2 = (uint8_t *)&nv->version;
6527 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
6529 *dptr1++ = *dptr2++;
6531 icb->login_retry_count = nv->login_retry_count;
6532 icb->link_down_on_nos = nv->link_down_on_nos;
6534 /* Copy 2nd segment. */
6535 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
6536 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
6537 cnt = (uint8_t *)&icb->reserved_3 -
6538 (uint8_t *)&icb->interrupt_delay_timer;
6540 *dptr1++ = *dptr2++;
6543 * Setup driver NVRAM options.
6545 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
6548 qlt_24xx_config_nvram_stage2(vha, icb);
6550 if (nv->host_p & cpu_to_le32(BIT_15)) {
6551 /* Use alternate WWN? */
6552 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6553 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6556 /* Prepare nodename */
6557 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
6559 * Firmware will apply the following mask if the nodename was
6562 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
6563 icb->node_name[0] &= 0xF0;
6566 /* Set host adapter parameters. */
6567 ha->flags.disable_risc_code_load = 0;
6568 ha->flags.enable_lip_reset = 0;
6569 ha->flags.enable_lip_full_login =
6570 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
6571 ha->flags.enable_target_reset =
6572 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
6573 ha->flags.enable_led_scheme = 0;
6574 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
6576 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
6577 (BIT_6 | BIT_5 | BIT_4)) >> 4;
6579 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
6580 sizeof(ha->fw_seriallink_options24));
6582 /* save HBA serial number */
6583 ha->serial0 = icb->port_name[5];
6584 ha->serial1 = icb->port_name[6];
6585 ha->serial2 = icb->port_name[7];
6586 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6587 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
6589 icb->execution_throttle = cpu_to_le16(0xFFFF);
6591 ha->retry_count = le16_to_cpu(nv->login_retry_count);
6593 /* Set minimum login_timeout to 4 seconds. */
6594 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6595 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6596 if (le16_to_cpu(nv->login_timeout) < 4)
6597 nv->login_timeout = cpu_to_le16(4);
6598 ha->login_timeout = le16_to_cpu(nv->login_timeout);
6600 /* Set minimum RATOV to 100 tenths of a second. */
6603 ha->loop_reset_delay = nv->reset_delay;
6605 /* Link Down Timeout = 0:
6607 * When Port Down timer expires we will start returning
6608 * I/O's to OS with "DID_NO_CONNECT".
6610 * Link Down Timeout != 0:
6612 * The driver waits for the link to come up after link down
6613 * before returning I/Os to OS with "DID_NO_CONNECT".
6615 if (le16_to_cpu(nv->link_down_timeout) == 0) {
6616 ha->loop_down_abort_time =
6617 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
6619 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
6620 ha->loop_down_abort_time =
6621 (LOOP_DOWN_TIME - ha->link_down_timeout);
6624 /* Need enough time to try and get the port back. */
6625 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
6626 if (qlport_down_retry)
6627 ha->port_down_retry_count = qlport_down_retry;
6629 /* Set login_retry_count */
6630 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
6631 if (ha->port_down_retry_count ==
6632 le16_to_cpu(nv->port_down_retry_count) &&
6633 ha->port_down_retry_count > 3)
6634 ha->login_retry_count = ha->port_down_retry_count;
6635 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
6636 ha->login_retry_count = ha->port_down_retry_count;
6637 if (ql2xloginretrycount)
6638 ha->login_retry_count = ql2xloginretrycount;
6641 if (!vha->flags.init_done) {
6642 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
6643 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
6644 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6645 le16_to_cpu(icb->interrupt_delay_timer): 2;
6647 icb->firmware_options_2 &= cpu_to_le32(
6648 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
6649 vha->flags.process_response_queue = 0;
6650 if (ha->zio_mode != QLA_ZIO_DISABLED) {
6651 ha->zio_mode = QLA_ZIO_MODE_6;
6653 ql_log(ql_log_info, vha, 0x006f,
6654 "ZIO mode %d enabled; timer delay (%d us).\n",
6655 ha->zio_mode, ha->zio_timer * 100);
6657 icb->firmware_options_2 |= cpu_to_le32(
6658 (uint32_t)ha->zio_mode);
6659 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
6660 vha->flags.process_response_queue = 1;
6664 ql_log(ql_log_warn, vha, 0x0070,
6665 "NVRAM configuration failed.\n");
6670 uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
6672 struct qla27xx_image_status pri_image_status, sec_image_status;
6673 uint8_t valid_pri_image, valid_sec_image;
6675 uint32_t cnt, chksum, size;
6676 struct qla_hw_data *ha = vha->hw;
6678 valid_pri_image = valid_sec_image = 1;
6679 ha->active_image = 0;
6680 size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
6682 if (!ha->flt_region_img_status_pri) {
6683 valid_pri_image = 0;
6684 goto check_sec_image;
6687 qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
6688 ha->flt_region_img_status_pri, size);
6690 if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6691 ql_dbg(ql_dbg_init, vha, 0x018b,
6692 "Primary image signature (0x%x) not valid\n",
6693 pri_image_status.signature);
6694 valid_pri_image = 0;
6695 goto check_sec_image;
6698 wptr = (uint32_t *)(&pri_image_status);
6701 for (chksum = 0; cnt--; wptr++)
6702 chksum += le32_to_cpu(*wptr);
6705 ql_dbg(ql_dbg_init, vha, 0x018c,
6706 "Checksum validation failed for primary image (0x%x)\n",
6708 valid_pri_image = 0;
6712 if (!ha->flt_region_img_status_sec) {
6713 valid_sec_image = 0;
6714 goto check_valid_image;
6717 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
6718 ha->flt_region_img_status_sec, size);
6720 if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
6721 ql_dbg(ql_dbg_init, vha, 0x018d,
6722 "Secondary image signature(0x%x) not valid\n",
6723 sec_image_status.signature);
6724 valid_sec_image = 0;
6725 goto check_valid_image;
6728 wptr = (uint32_t *)(&sec_image_status);
6730 for (chksum = 0; cnt--; wptr++)
6731 chksum += le32_to_cpu(*wptr);
6733 ql_dbg(ql_dbg_init, vha, 0x018e,
6734 "Checksum validation failed for secondary image (0x%x)\n",
6736 valid_sec_image = 0;
6740 if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
6741 ha->active_image = QLA27XX_PRIMARY_IMAGE;
6742 if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
6743 if (!ha->active_image ||
6744 pri_image_status.generation_number <
6745 sec_image_status.generation_number)
6746 ha->active_image = QLA27XX_SECONDARY_IMAGE;
6749 ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
6750 ha->active_image == 0 ? "default bootld and fw" :
6751 ha->active_image == 1 ? "primary" :
6752 ha->active_image == 2 ? "secondary" :
6755 return ha->active_image;
6759 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
6762 int rval = QLA_SUCCESS;
6763 int segments, fragment;
6764 uint32_t *dcode, dlen;
6768 struct qla_hw_data *ha = vha->hw;
6769 struct req_que *req = ha->req_q_map[0];
6771 ql_dbg(ql_dbg_init, vha, 0x008b,
6772 "FW: Loading firmware from flash (%x).\n", faddr);
6776 segments = FA_RISC_CODE_SEGMENTS;
6777 dcode = (uint32_t *)req->ring;
6780 if (IS_QLA27XX(ha) &&
6781 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
6782 faddr = ha->flt_region_fw_sec;
6784 /* Validate firmware image by checking version. */
6785 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
6786 for (i = 0; i < 4; i++)
6787 dcode[i] = be32_to_cpu(dcode[i]);
6788 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
6789 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
6790 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
6792 ql_log(ql_log_fatal, vha, 0x008c,
6793 "Unable to verify the integrity of flash firmware "
6795 ql_log(ql_log_fatal, vha, 0x008d,
6796 "Firmware data: %08x %08x %08x %08x.\n",
6797 dcode[0], dcode[1], dcode[2], dcode[3]);
6799 return QLA_FUNCTION_FAILED;
6802 while (segments && rval == QLA_SUCCESS) {
6803 /* Read segment's load information. */
6804 qla24xx_read_flash_data(vha, dcode, faddr, 4);
6806 risc_addr = be32_to_cpu(dcode[2]);
6807 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
6808 risc_size = be32_to_cpu(dcode[3]);
6811 while (risc_size > 0 && rval == QLA_SUCCESS) {
6812 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
6813 if (dlen > risc_size)
6816 ql_dbg(ql_dbg_init, vha, 0x008e,
6817 "Loading risc segment@ risc addr %x "
6818 "number of dwords 0x%x offset 0x%x.\n",
6819 risc_addr, dlen, faddr);
6821 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
6822 for (i = 0; i < dlen; i++)
6823 dcode[i] = swab32(dcode[i]);
6825 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
6828 ql_log(ql_log_fatal, vha, 0x008f,
6829 "Failed to load segment %d of firmware.\n",
6831 return QLA_FUNCTION_FAILED;
6844 if (!IS_QLA27XX(ha))
6847 if (ha->fw_dump_template)
6848 vfree(ha->fw_dump_template);
6849 ha->fw_dump_template = NULL;
6850 ha->fw_dump_template_len = 0;
6852 ql_dbg(ql_dbg_init, vha, 0x0161,
6853 "Loading fwdump template from %x\n", faddr);
6854 qla24xx_read_flash_data(vha, dcode, faddr, 7);
6855 risc_size = be32_to_cpu(dcode[2]);
6856 ql_dbg(ql_dbg_init, vha, 0x0162,
6857 "-> array size %x dwords\n", risc_size);
6858 if (risc_size == 0 || risc_size == ~0)
6859 goto default_template;
6861 dlen = (risc_size - 8) * sizeof(*dcode);
6862 ql_dbg(ql_dbg_init, vha, 0x0163,
6863 "-> template allocating %x bytes...\n", dlen);
6864 ha->fw_dump_template = vmalloc(dlen);
6865 if (!ha->fw_dump_template) {
6866 ql_log(ql_log_warn, vha, 0x0164,
6867 "Failed fwdump template allocate %x bytes.\n", risc_size);
6868 goto default_template;
6873 dcode = ha->fw_dump_template;
6874 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
6875 for (i = 0; i < risc_size; i++)
6876 dcode[i] = le32_to_cpu(dcode[i]);
6878 if (!qla27xx_fwdt_template_valid(dcode)) {
6879 ql_log(ql_log_warn, vha, 0x0165,
6880 "Failed fwdump template validate\n");
6881 goto default_template;
6884 dlen = qla27xx_fwdt_template_size(dcode);
6885 ql_dbg(ql_dbg_init, vha, 0x0166,
6886 "-> template size %x bytes\n", dlen);
6887 if (dlen > risc_size * sizeof(*dcode)) {
6888 ql_log(ql_log_warn, vha, 0x0167,
6889 "Failed fwdump template exceeds array by %zx bytes\n",
6890 (size_t)(dlen - risc_size * sizeof(*dcode)));
6891 goto default_template;
6893 ha->fw_dump_template_len = dlen;
6897 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
6898 if (ha->fw_dump_template)
6899 vfree(ha->fw_dump_template);
6900 ha->fw_dump_template = NULL;
6901 ha->fw_dump_template_len = 0;
6903 dlen = qla27xx_fwdt_template_default_size();
6904 ql_dbg(ql_dbg_init, vha, 0x0169,
6905 "-> template allocating %x bytes...\n", dlen);
6906 ha->fw_dump_template = vmalloc(dlen);
6907 if (!ha->fw_dump_template) {
6908 ql_log(ql_log_warn, vha, 0x016a,
6909 "Failed fwdump template allocate %x bytes.\n", risc_size);
6910 goto failed_template;
6913 dcode = ha->fw_dump_template;
6914 risc_size = dlen / sizeof(*dcode);
6915 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
6916 for (i = 0; i < risc_size; i++)
6917 dcode[i] = be32_to_cpu(dcode[i]);
6919 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
6920 ql_log(ql_log_warn, vha, 0x016b,
6921 "Failed fwdump template validate\n");
6922 goto failed_template;
6925 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
6926 ql_dbg(ql_dbg_init, vha, 0x016c,
6927 "-> template size %x bytes\n", dlen);
6928 ha->fw_dump_template_len = dlen;
6932 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
6933 if (ha->fw_dump_template)
6934 vfree(ha->fw_dump_template);
6935 ha->fw_dump_template = NULL;
6936 ha->fw_dump_template_len = 0;
6940 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
6943 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
6947 uint16_t *wcode, *fwcode;
6948 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
6949 struct fw_blob *blob;
6950 struct qla_hw_data *ha = vha->hw;
6951 struct req_que *req = ha->req_q_map[0];
6953 /* Load firmware blob. */
6954 blob = qla2x00_request_firmware(vha);
6956 ql_log(ql_log_info, vha, 0x0083,
6957 "Firmware image unavailable.\n");
6958 ql_log(ql_log_info, vha, 0x0084,
6959 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
6960 return QLA_FUNCTION_FAILED;
6965 wcode = (uint16_t *)req->ring;
6967 fwcode = (uint16_t *)blob->fw->data;
6970 /* Validate firmware image by checking version. */
6971 if (blob->fw->size < 8 * sizeof(uint16_t)) {
6972 ql_log(ql_log_fatal, vha, 0x0085,
6973 "Unable to verify integrity of firmware image (%zd).\n",
6975 goto fail_fw_integrity;
6977 for (i = 0; i < 4; i++)
6978 wcode[i] = be16_to_cpu(fwcode[i + 4]);
6979 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
6980 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
6981 wcode[2] == 0 && wcode[3] == 0)) {
6982 ql_log(ql_log_fatal, vha, 0x0086,
6983 "Unable to verify integrity of firmware image.\n");
6984 ql_log(ql_log_fatal, vha, 0x0087,
6985 "Firmware data: %04x %04x %04x %04x.\n",
6986 wcode[0], wcode[1], wcode[2], wcode[3]);
6987 goto fail_fw_integrity;
6991 while (*seg && rval == QLA_SUCCESS) {
6993 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
6994 risc_size = be16_to_cpu(fwcode[3]);
6996 /* Validate firmware image size. */
6997 fwclen += risc_size * sizeof(uint16_t);
6998 if (blob->fw->size < fwclen) {
6999 ql_log(ql_log_fatal, vha, 0x0088,
7000 "Unable to verify integrity of firmware image "
7001 "(%zd).\n", blob->fw->size);
7002 goto fail_fw_integrity;
7006 while (risc_size > 0 && rval == QLA_SUCCESS) {
7007 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7008 if (wlen > risc_size)
7010 ql_dbg(ql_dbg_init, vha, 0x0089,
7011 "Loading risc segment@ risc addr %x number of "
7012 "words 0x%x.\n", risc_addr, wlen);
7014 for (i = 0; i < wlen; i++)
7015 wcode[i] = swab16(fwcode[i]);
7017 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7020 ql_log(ql_log_fatal, vha, 0x008a,
7021 "Failed to load segment %d of firmware.\n",
7038 return QLA_FUNCTION_FAILED;
7042 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7045 int segments, fragment;
7046 uint32_t *dcode, dlen;
7050 struct fw_blob *blob;
7051 const uint32_t *fwcode;
7053 struct qla_hw_data *ha = vha->hw;
7054 struct req_que *req = ha->req_q_map[0];
7056 /* Load firmware blob. */
7057 blob = qla2x00_request_firmware(vha);
7059 ql_log(ql_log_warn, vha, 0x0090,
7060 "Firmware image unavailable.\n");
7061 ql_log(ql_log_warn, vha, 0x0091,
7062 "Firmware images can be retrieved from: "
7065 return QLA_FUNCTION_FAILED;
7068 ql_dbg(ql_dbg_init, vha, 0x0092,
7069 "FW: Loading via request-firmware.\n");
7073 segments = FA_RISC_CODE_SEGMENTS;
7074 dcode = (uint32_t *)req->ring;
7076 fwcode = (uint32_t *)blob->fw->data;
7079 /* Validate firmware image by checking version. */
7080 if (blob->fw->size < 8 * sizeof(uint32_t)) {
7081 ql_log(ql_log_fatal, vha, 0x0093,
7082 "Unable to verify integrity of firmware image (%zd).\n",
7084 return QLA_FUNCTION_FAILED;
7086 for (i = 0; i < 4; i++)
7087 dcode[i] = be32_to_cpu(fwcode[i + 4]);
7088 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
7089 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
7090 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
7092 ql_log(ql_log_fatal, vha, 0x0094,
7093 "Unable to verify integrity of firmware image (%zd).\n",
7095 ql_log(ql_log_fatal, vha, 0x0095,
7096 "Firmware data: %08x %08x %08x %08x.\n",
7097 dcode[0], dcode[1], dcode[2], dcode[3]);
7098 return QLA_FUNCTION_FAILED;
7101 while (segments && rval == QLA_SUCCESS) {
7102 risc_addr = be32_to_cpu(fwcode[2]);
7103 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
7104 risc_size = be32_to_cpu(fwcode[3]);
7106 /* Validate firmware image size. */
7107 fwclen += risc_size * sizeof(uint32_t);
7108 if (blob->fw->size < fwclen) {
7109 ql_log(ql_log_fatal, vha, 0x0096,
7110 "Unable to verify integrity of firmware image "
7111 "(%zd).\n", blob->fw->size);
7112 return QLA_FUNCTION_FAILED;
7116 while (risc_size > 0 && rval == QLA_SUCCESS) {
7117 dlen = (uint32_t)(ha->fw_transfer_size >> 2);
7118 if (dlen > risc_size)
7121 ql_dbg(ql_dbg_init, vha, 0x0097,
7122 "Loading risc segment@ risc addr %x "
7123 "number of dwords 0x%x.\n", risc_addr, dlen);
7125 for (i = 0; i < dlen; i++)
7126 dcode[i] = swab32(fwcode[i]);
7128 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7131 ql_log(ql_log_fatal, vha, 0x0098,
7132 "Failed to load segment %d of firmware.\n",
7134 return QLA_FUNCTION_FAILED;
7147 if (!IS_QLA27XX(ha))
7150 if (ha->fw_dump_template)
7151 vfree(ha->fw_dump_template);
7152 ha->fw_dump_template = NULL;
7153 ha->fw_dump_template_len = 0;
7155 ql_dbg(ql_dbg_init, vha, 0x171,
7156 "Loading fwdump template from %x\n",
7157 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
7158 risc_size = be32_to_cpu(fwcode[2]);
7159 ql_dbg(ql_dbg_init, vha, 0x172,
7160 "-> array size %x dwords\n", risc_size);
7161 if (risc_size == 0 || risc_size == ~0)
7162 goto default_template;
7164 dlen = (risc_size - 8) * sizeof(*fwcode);
7165 ql_dbg(ql_dbg_init, vha, 0x0173,
7166 "-> template allocating %x bytes...\n", dlen);
7167 ha->fw_dump_template = vmalloc(dlen);
7168 if (!ha->fw_dump_template) {
7169 ql_log(ql_log_warn, vha, 0x0174,
7170 "Failed fwdump template allocate %x bytes.\n", risc_size);
7171 goto default_template;
7176 dcode = ha->fw_dump_template;
7177 for (i = 0; i < risc_size; i++)
7178 dcode[i] = le32_to_cpu(fwcode[i]);
7180 if (!qla27xx_fwdt_template_valid(dcode)) {
7181 ql_log(ql_log_warn, vha, 0x0175,
7182 "Failed fwdump template validate\n");
7183 goto default_template;
7186 dlen = qla27xx_fwdt_template_size(dcode);
7187 ql_dbg(ql_dbg_init, vha, 0x0176,
7188 "-> template size %x bytes\n", dlen);
7189 if (dlen > risc_size * sizeof(*fwcode)) {
7190 ql_log(ql_log_warn, vha, 0x0177,
7191 "Failed fwdump template exceeds array by %zx bytes\n",
7192 (size_t)(dlen - risc_size * sizeof(*fwcode)));
7193 goto default_template;
7195 ha->fw_dump_template_len = dlen;
7199 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
7200 if (ha->fw_dump_template)
7201 vfree(ha->fw_dump_template);
7202 ha->fw_dump_template = NULL;
7203 ha->fw_dump_template_len = 0;
7205 dlen = qla27xx_fwdt_template_default_size();
7206 ql_dbg(ql_dbg_init, vha, 0x0179,
7207 "-> template allocating %x bytes...\n", dlen);
7208 ha->fw_dump_template = vmalloc(dlen);
7209 if (!ha->fw_dump_template) {
7210 ql_log(ql_log_warn, vha, 0x017a,
7211 "Failed fwdump template allocate %x bytes.\n", risc_size);
7212 goto failed_template;
7215 dcode = ha->fw_dump_template;
7216 risc_size = dlen / sizeof(*fwcode);
7217 fwcode = qla27xx_fwdt_template_default();
7218 for (i = 0; i < risc_size; i++)
7219 dcode[i] = be32_to_cpu(fwcode[i]);
7221 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
7222 ql_log(ql_log_warn, vha, 0x017b,
7223 "Failed fwdump template validate\n");
7224 goto failed_template;
7227 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
7228 ql_dbg(ql_dbg_init, vha, 0x017c,
7229 "-> template size %x bytes\n", dlen);
7230 ha->fw_dump_template_len = dlen;
7234 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
7235 if (ha->fw_dump_template)
7236 vfree(ha->fw_dump_template);
7237 ha->fw_dump_template = NULL;
7238 ha->fw_dump_template_len = 0;
7243 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7247 if (ql2xfwloadbin == 1)
7248 return qla81xx_load_risc(vha, srisc_addr);
7252 * 1) Firmware via request-firmware interface (.bin file).
7253 * 2) Firmware residing in flash.
7255 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7256 if (rval == QLA_SUCCESS)
7259 return qla24xx_load_risc_flash(vha, srisc_addr,
7260 vha->hw->flt_region_fw);
7264 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7267 struct qla_hw_data *ha = vha->hw;
7269 if (ql2xfwloadbin == 2)
7274 * 1) Firmware residing in flash.
7275 * 2) Firmware via request-firmware interface (.bin file).
7276 * 3) Golden-Firmware residing in flash -- limited operation.
7278 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
7279 if (rval == QLA_SUCCESS)
7283 rval = qla24xx_load_risc_blob(vha, srisc_addr);
7284 if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
7287 ql_log(ql_log_info, vha, 0x0099,
7288 "Attempting to fallback to golden firmware.\n");
7289 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
7290 if (rval != QLA_SUCCESS)
7293 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
7294 ha->flags.running_gold_fw = 1;
7299 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
7302 struct qla_hw_data *ha = vha->hw;
7304 if (ha->flags.pci_channel_io_perm_failure)
7306 if (!IS_FWI2_CAPABLE(ha))
7308 if (!ha->fw_major_version)
7310 if (!ha->flags.fw_started)
7313 ret = qla2x00_stop_firmware(vha);
7314 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
7315 ret != QLA_INVALID_COMMAND && retries ; retries--) {
7316 ha->isp_ops->reset_chip(vha);
7317 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
7319 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
7321 ql_log(ql_log_info, vha, 0x8015,
7322 "Attempting retry of stop-firmware command.\n");
7323 ret = qla2x00_stop_firmware(vha);
7327 ha->flags.fw_init_done = 0;
7331 qla24xx_configure_vhba(scsi_qla_host_t *vha)
7333 int rval = QLA_SUCCESS;
7335 uint16_t mb[MAILBOX_REGISTER_COUNT];
7336 struct qla_hw_data *ha = vha->hw;
7337 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7338 struct req_que *req;
7339 struct rsp_que *rsp;
7344 rval = qla2x00_fw_ready(base_vha);
7346 req = vha->qpair->req;
7348 req = ha->req_q_map[0];
7351 if (rval == QLA_SUCCESS) {
7352 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7353 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7356 vha->flags.management_server_logged_in = 0;
7358 /* Login to SNS first */
7359 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
7361 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
7362 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
7363 ql_dbg(ql_dbg_init, vha, 0x0120,
7364 "Failed SNS login: loop_id=%x, rval2=%d\n",
7367 ql_dbg(ql_dbg_init, vha, 0x0103,
7368 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
7369 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
7370 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
7371 return (QLA_FUNCTION_FAILED);
7374 atomic_set(&vha->loop_down_timer, 0);
7375 atomic_set(&vha->loop_state, LOOP_UP);
7376 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7377 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
7378 rval = qla2x00_loop_resync(base_vha);
7383 /* 84XX Support **************************************************************/
7385 static LIST_HEAD(qla_cs84xx_list);
7386 static DEFINE_MUTEX(qla_cs84xx_mutex);
7388 static struct qla_chip_state_84xx *
7389 qla84xx_get_chip(struct scsi_qla_host *vha)
7391 struct qla_chip_state_84xx *cs84xx;
7392 struct qla_hw_data *ha = vha->hw;
7394 mutex_lock(&qla_cs84xx_mutex);
7396 /* Find any shared 84xx chip. */
7397 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
7398 if (cs84xx->bus == ha->pdev->bus) {
7399 kref_get(&cs84xx->kref);
7404 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
7408 kref_init(&cs84xx->kref);
7409 spin_lock_init(&cs84xx->access_lock);
7410 mutex_init(&cs84xx->fw_update_mutex);
7411 cs84xx->bus = ha->pdev->bus;
7413 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
7415 mutex_unlock(&qla_cs84xx_mutex);
7420 __qla84xx_chip_release(struct kref *kref)
7422 struct qla_chip_state_84xx *cs84xx =
7423 container_of(kref, struct qla_chip_state_84xx, kref);
7425 mutex_lock(&qla_cs84xx_mutex);
7426 list_del(&cs84xx->list);
7427 mutex_unlock(&qla_cs84xx_mutex);
7432 qla84xx_put_chip(struct scsi_qla_host *vha)
7434 struct qla_hw_data *ha = vha->hw;
7436 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
7440 qla84xx_init_chip(scsi_qla_host_t *vha)
7444 struct qla_hw_data *ha = vha->hw;
7446 mutex_lock(&ha->cs84xx->fw_update_mutex);
7448 rval = qla84xx_verify_chip(vha, status);
7450 mutex_unlock(&ha->cs84xx->fw_update_mutex);
7452 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
7456 /* 81XX Support **************************************************************/
7459 qla81xx_nvram_config(scsi_qla_host_t *vha)
7462 struct init_cb_81xx *icb;
7463 struct nvram_81xx *nv;
7465 uint8_t *dptr1, *dptr2;
7468 struct qla_hw_data *ha = vha->hw;
7471 icb = (struct init_cb_81xx *)ha->init_cb;
7474 /* Determine NVRAM starting address. */
7475 ha->nvram_size = sizeof(struct nvram_81xx);
7476 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7477 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
7478 ha->vpd_size = FA_VPD_SIZE_82XX;
7480 /* Get VPD data into cache */
7481 ha->vpd = ha->nvram + VPD_OFFSET;
7482 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
7485 /* Get NVRAM data into cache and calculate checksum. */
7486 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
7488 dptr = (uint32_t *)nv;
7489 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7490 chksum += le32_to_cpu(*dptr);
7492 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
7493 "Contents of NVRAM:\n");
7494 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
7495 (uint8_t *)nv, ha->nvram_size);
7497 /* Bad NVRAM data, set defaults parameters. */
7498 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
7499 || nv->id[3] != ' ' ||
7500 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
7501 /* Reset NVRAM data. */
7502 ql_log(ql_log_info, vha, 0x0073,
7503 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
7504 "version=0x%x.\n", chksum, nv->id[0],
7505 le16_to_cpu(nv->nvram_version));
7506 ql_log(ql_log_info, vha, 0x0074,
7507 "Falling back to functioning (yet invalid -- WWPN) "
7511 * Set default initialization control block.
7513 memset(nv, 0, ha->nvram_size);
7514 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7515 nv->version = cpu_to_le16(ICB_VERSION);
7516 nv->frame_payload_size = 2048;
7517 nv->execution_throttle = cpu_to_le16(0xFFFF);
7518 nv->exchange_count = cpu_to_le16(0);
7519 nv->port_name[0] = 0x21;
7520 nv->port_name[1] = 0x00 + ha->port_no + 1;
7521 nv->port_name[2] = 0x00;
7522 nv->port_name[3] = 0xe0;
7523 nv->port_name[4] = 0x8b;
7524 nv->port_name[5] = 0x1c;
7525 nv->port_name[6] = 0x55;
7526 nv->port_name[7] = 0x86;
7527 nv->node_name[0] = 0x20;
7528 nv->node_name[1] = 0x00;
7529 nv->node_name[2] = 0x00;
7530 nv->node_name[3] = 0xe0;
7531 nv->node_name[4] = 0x8b;
7532 nv->node_name[5] = 0x1c;
7533 nv->node_name[6] = 0x55;
7534 nv->node_name[7] = 0x86;
7535 nv->login_retry_count = cpu_to_le16(8);
7536 nv->interrupt_delay_timer = cpu_to_le16(0);
7537 nv->login_timeout = cpu_to_le16(0);
7538 nv->firmware_options_1 =
7539 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7540 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7541 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7542 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7543 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7544 nv->efi_parameters = cpu_to_le32(0);
7545 nv->reset_delay = 5;
7546 nv->max_luns_per_target = cpu_to_le16(128);
7547 nv->port_down_retry_count = cpu_to_le16(30);
7548 nv->link_down_timeout = cpu_to_le16(180);
7549 nv->enode_mac[0] = 0x00;
7550 nv->enode_mac[1] = 0xC0;
7551 nv->enode_mac[2] = 0xDD;
7552 nv->enode_mac[3] = 0x04;
7553 nv->enode_mac[4] = 0x05;
7554 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
7559 if (IS_T10_PI_CAPABLE(ha))
7560 nv->frame_payload_size &= ~7;
7562 qlt_81xx_config_nvram_stage1(vha, nv);
7564 /* Reset Initialization control block */
7565 memset(icb, 0, ha->init_cb_size);
7567 /* Copy 1st segment. */
7568 dptr1 = (uint8_t *)icb;
7569 dptr2 = (uint8_t *)&nv->version;
7570 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7572 *dptr1++ = *dptr2++;
7574 icb->login_retry_count = nv->login_retry_count;
7576 /* Copy 2nd segment. */
7577 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7578 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7579 cnt = (uint8_t *)&icb->reserved_5 -
7580 (uint8_t *)&icb->interrupt_delay_timer;
7582 *dptr1++ = *dptr2++;
7584 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
7585 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
7586 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
7587 icb->enode_mac[0] = 0x00;
7588 icb->enode_mac[1] = 0xC0;
7589 icb->enode_mac[2] = 0xDD;
7590 icb->enode_mac[3] = 0x04;
7591 icb->enode_mac[4] = 0x05;
7592 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
7595 /* Use extended-initialization control block. */
7596 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
7599 * Setup driver NVRAM options.
7601 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7604 qlt_81xx_config_nvram_stage2(vha, icb);
7606 /* Use alternate WWN? */
7607 if (nv->host_p & cpu_to_le32(BIT_15)) {
7608 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7609 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7612 /* Prepare nodename */
7613 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7615 * Firmware will apply the following mask if the nodename was
7618 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7619 icb->node_name[0] &= 0xF0;
7622 /* Set host adapter parameters. */
7623 ha->flags.disable_risc_code_load = 0;
7624 ha->flags.enable_lip_reset = 0;
7625 ha->flags.enable_lip_full_login =
7626 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
7627 ha->flags.enable_target_reset =
7628 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
7629 ha->flags.enable_led_scheme = 0;
7630 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
7632 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7633 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7635 /* save HBA serial number */
7636 ha->serial0 = icb->port_name[5];
7637 ha->serial1 = icb->port_name[6];
7638 ha->serial2 = icb->port_name[7];
7639 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7640 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7642 icb->execution_throttle = cpu_to_le16(0xFFFF);
7644 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7646 /* Set minimum login_timeout to 4 seconds. */
7647 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7648 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7649 if (le16_to_cpu(nv->login_timeout) < 4)
7650 nv->login_timeout = cpu_to_le16(4);
7651 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7653 /* Set minimum RATOV to 100 tenths of a second. */
7656 ha->loop_reset_delay = nv->reset_delay;
7658 /* Link Down Timeout = 0:
7660 * When Port Down timer expires we will start returning
7661 * I/O's to OS with "DID_NO_CONNECT".
7663 * Link Down Timeout != 0:
7665 * The driver waits for the link to come up after link down
7666 * before returning I/Os to OS with "DID_NO_CONNECT".
7668 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7669 ha->loop_down_abort_time =
7670 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7672 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7673 ha->loop_down_abort_time =
7674 (LOOP_DOWN_TIME - ha->link_down_timeout);
7677 /* Need enough time to try and get the port back. */
7678 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7679 if (qlport_down_retry)
7680 ha->port_down_retry_count = qlport_down_retry;
7682 /* Set login_retry_count */
7683 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7684 if (ha->port_down_retry_count ==
7685 le16_to_cpu(nv->port_down_retry_count) &&
7686 ha->port_down_retry_count > 3)
7687 ha->login_retry_count = ha->port_down_retry_count;
7688 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7689 ha->login_retry_count = ha->port_down_retry_count;
7690 if (ql2xloginretrycount)
7691 ha->login_retry_count = ql2xloginretrycount;
7693 /* if not running MSI-X we need handshaking on interrupts */
7694 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
7695 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
7698 if (!vha->flags.init_done) {
7699 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7700 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7701 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7702 le16_to_cpu(icb->interrupt_delay_timer): 2;
7704 icb->firmware_options_2 &= cpu_to_le32(
7705 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7706 vha->flags.process_response_queue = 0;
7707 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7708 ha->zio_mode = QLA_ZIO_MODE_6;
7710 ql_log(ql_log_info, vha, 0x0075,
7711 "ZIO mode %d enabled; timer delay (%d us).\n",
7713 ha->zio_timer * 100);
7715 icb->firmware_options_2 |= cpu_to_le32(
7716 (uint32_t)ha->zio_mode);
7717 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7718 vha->flags.process_response_queue = 1;
7721 /* enable RIDA Format2 */
7722 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7723 icb->firmware_options_3 |= BIT_0;
7725 if (IS_QLA27XX(ha)) {
7726 icb->firmware_options_3 |= BIT_8;
7727 ql_dbg(ql_log_info, vha, 0x0075,
7728 "Enabling direct connection.\n");
7732 ql_log(ql_log_warn, vha, 0x0076,
7733 "NVRAM configuration failed.\n");
7739 qla82xx_restart_isp(scsi_qla_host_t *vha)
7742 struct qla_hw_data *ha = vha->hw;
7743 struct req_que *req = ha->req_q_map[0];
7744 struct rsp_que *rsp = ha->rsp_q_map[0];
7745 struct scsi_qla_host *vp;
7746 unsigned long flags;
7748 status = qla2x00_init_rings(vha);
7750 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7751 ha->flags.chip_reset_done = 1;
7753 status = qla2x00_fw_ready(vha);
7755 /* Issue a marker after FW becomes ready. */
7756 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
7757 vha->flags.online = 1;
7758 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7761 /* if no cable then assume it's good */
7762 if ((vha->device_flags & DFLG_NO_CABLE))
7767 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7769 if (!atomic_read(&vha->loop_down_timer)) {
7771 * Issue marker command only when we are going
7772 * to start the I/O .
7774 vha->marker_needed = 1;
7777 ha->isp_ops->enable_intrs(ha);
7779 ha->isp_abort_cnt = 0;
7780 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7782 /* Update the firmware version */
7783 status = qla82xx_check_md_needed(vha);
7786 ha->flags.fce_enabled = 1;
7788 fce_calc_size(ha->fce_bufs));
7789 rval = qla2x00_enable_fce_trace(vha,
7790 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7793 ql_log(ql_log_warn, vha, 0x8001,
7794 "Unable to reinitialize FCE (%d).\n",
7796 ha->flags.fce_enabled = 0;
7801 memset(ha->eft, 0, EFT_SIZE);
7802 rval = qla2x00_enable_eft_trace(vha,
7803 ha->eft_dma, EFT_NUM_BUFFERS);
7805 ql_log(ql_log_warn, vha, 0x8010,
7806 "Unable to reinitialize EFT (%d).\n",
7813 ql_dbg(ql_dbg_taskm, vha, 0x8011,
7814 "qla82xx_restart_isp succeeded.\n");
7816 spin_lock_irqsave(&ha->vport_slock, flags);
7817 list_for_each_entry(vp, &ha->vp_list, list) {
7819 atomic_inc(&vp->vref_count);
7820 spin_unlock_irqrestore(&ha->vport_slock, flags);
7822 qla2x00_vp_abort_isp(vp);
7824 spin_lock_irqsave(&ha->vport_slock, flags);
7825 atomic_dec(&vp->vref_count);
7828 spin_unlock_irqrestore(&ha->vport_slock, flags);
7831 ql_log(ql_log_warn, vha, 0x8016,
7832 "qla82xx_restart_isp **** FAILED ****.\n");
7839 qla81xx_update_fw_options(scsi_qla_host_t *vha)
7841 struct qla_hw_data *ha = vha->hw;
7843 /* Hold status IOCBs until ABTS response received. */
7845 ha->fw_options[3] |= BIT_12;
7847 /* Set Retry FLOGI in case of P2P connection */
7848 if (ha->operating_mode == P2P) {
7849 ha->fw_options[2] |= BIT_3;
7850 ql_dbg(ql_dbg_disc, vha, 0x2103,
7851 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
7852 __func__, ha->fw_options[2]);
7855 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
7856 if (ql2xmvasynctoatio) {
7857 if (qla_tgt_mode_enabled(vha) ||
7858 qla_dual_mode_enabled(vha))
7859 ha->fw_options[2] |= BIT_11;
7861 ha->fw_options[2] &= ~BIT_11;
7864 if (qla_tgt_mode_enabled(vha) ||
7865 qla_dual_mode_enabled(vha)) {
7866 /* FW auto send SCSI status during */
7867 ha->fw_options[1] |= BIT_8;
7868 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
7870 /* FW perform Exchange validation */
7871 ha->fw_options[2] |= BIT_4;
7873 ha->fw_options[1] &= ~BIT_8;
7874 ha->fw_options[10] &= 0x00ff;
7876 ha->fw_options[2] &= ~BIT_4;
7879 if (ql2xetsenable) {
7880 /* Enable ETS Burst. */
7881 memset(ha->fw_options, 0, sizeof(ha->fw_options));
7882 ha->fw_options[2] |= BIT_9;
7885 ql_dbg(ql_dbg_init, vha, 0x00e9,
7886 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
7887 __func__, ha->fw_options[1], ha->fw_options[2],
7888 ha->fw_options[3], vha->host->active_mode);
7890 qla2x00_set_fw_options(vha, ha->fw_options);
7894 * qla24xx_get_fcp_prio
7895 * Gets the fcp cmd priority value for the logged in port.
7896 * Looks for a match of the port descriptors within
7897 * each of the fcp prio config entries. If a match is found,
7898 * the tag (priority) value is returned.
7901 * vha = scsi host structure pointer.
7902 * fcport = port structure pointer.
7905 * non-zero (if found)
7912 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
7915 uint8_t pid_match, wwn_match;
7917 uint32_t pid1, pid2;
7918 uint64_t wwn1, wwn2;
7919 struct qla_fcp_prio_entry *pri_entry;
7920 struct qla_hw_data *ha = vha->hw;
7922 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
7926 entries = ha->fcp_prio_cfg->num_entries;
7927 pri_entry = &ha->fcp_prio_cfg->entry[0];
7929 for (i = 0; i < entries; i++) {
7930 pid_match = wwn_match = 0;
7932 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
7937 /* check source pid for a match */
7938 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
7939 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
7940 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
7941 if (pid1 == INVALID_PORT_ID)
7943 else if (pid1 == pid2)
7947 /* check destination pid for a match */
7948 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
7949 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
7950 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
7951 if (pid1 == INVALID_PORT_ID)
7953 else if (pid1 == pid2)
7957 /* check source WWN for a match */
7958 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
7959 wwn1 = wwn_to_u64(vha->port_name);
7960 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
7961 if (wwn2 == (uint64_t)-1)
7963 else if (wwn1 == wwn2)
7967 /* check destination WWN for a match */
7968 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
7969 wwn1 = wwn_to_u64(fcport->port_name);
7970 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
7971 if (wwn2 == (uint64_t)-1)
7973 else if (wwn1 == wwn2)
7977 if (pid_match == 2 || wwn_match == 2) {
7978 /* Found a matching entry */
7979 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
7980 priority = pri_entry->tag;
7991 * qla24xx_update_fcport_fcp_prio
7992 * Activates fcp priority for the logged in fc port
7995 * vha = scsi host structure pointer.
7996 * fcp = port structure pointer.
7999 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8005 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8011 if (fcport->port_type != FCT_TARGET ||
8012 fcport->loop_id == FC_NO_LOOP_ID)
8013 return QLA_FUNCTION_FAILED;
8015 priority = qla24xx_get_fcp_prio(vha, fcport);
8017 return QLA_FUNCTION_FAILED;
8019 if (IS_P3P_TYPE(vha->hw)) {
8020 fcport->fcp_prio = priority & 0xf;
8024 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
8025 if (ret == QLA_SUCCESS) {
8026 if (fcport->fcp_prio != priority)
8027 ql_dbg(ql_dbg_user, vha, 0x709e,
8028 "Updated FCP_CMND priority - value=%d loop_id=%d "
8029 "port_id=%02x%02x%02x.\n", priority,
8030 fcport->loop_id, fcport->d_id.b.domain,
8031 fcport->d_id.b.area, fcport->d_id.b.al_pa);
8032 fcport->fcp_prio = priority & 0xf;
8034 ql_dbg(ql_dbg_user, vha, 0x704f,
8035 "Unable to update FCP_CMND priority - ret=0x%x for "
8036 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8037 fcport->d_id.b.domain, fcport->d_id.b.area,
8038 fcport->d_id.b.al_pa);
8043 * qla24xx_update_all_fcp_prio
8044 * Activates fcp priority for all the logged in ports
8047 * ha = adapter block pointer.
8050 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8056 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8061 ret = QLA_FUNCTION_FAILED;
8062 /* We need to set priority for all logged in ports */
8063 list_for_each_entry(fcport, &vha->vp_fcports, list)
8064 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8069 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8070 int vp_idx, bool startqp)
8075 struct qla_hw_data *ha = vha->hw;
8076 uint16_t qpair_id = 0;
8077 struct qla_qpair *qpair = NULL;
8078 struct qla_msix_entry *msix;
8080 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
8081 ql_log(ql_log_warn, vha, 0x00181,
8082 "FW/Driver is not multi-queue capable.\n");
8086 if (ql2xmqsupport || ql2xnvmeenable) {
8087 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
8088 if (qpair == NULL) {
8089 ql_log(ql_log_warn, vha, 0x0182,
8090 "Failed to allocate memory for queue pair.\n");
8093 memset(qpair, 0, sizeof(struct qla_qpair));
8095 qpair->hw = vha->hw;
8097 qpair->qp_lock_ptr = &qpair->qp_lock;
8098 spin_lock_init(&qpair->qp_lock);
8099 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
8101 /* Assign available que pair id */
8102 mutex_lock(&ha->mq_lock);
8103 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
8104 if (ha->num_qpairs >= ha->max_qpairs) {
8105 mutex_unlock(&ha->mq_lock);
8106 ql_log(ql_log_warn, vha, 0x0183,
8107 "No resources to create additional q pair.\n");
8111 set_bit(qpair_id, ha->qpair_qid_map);
8112 ha->queue_pair_map[qpair_id] = qpair;
8113 qpair->id = qpair_id;
8114 qpair->vp_idx = vp_idx;
8115 qpair->fw_started = ha->flags.fw_started;
8116 INIT_LIST_HEAD(&qpair->hints_list);
8117 INIT_LIST_HEAD(&qpair->nvme_done_list);
8118 qpair->chip_reset = ha->base_qpair->chip_reset;
8119 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
8120 qpair->enable_explicit_conf =
8121 ha->base_qpair->enable_explicit_conf;
8123 for (i = 0; i < ha->msix_count; i++) {
8124 msix = &ha->msix_entries[i];
8128 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
8129 "Vector %x selected for qpair\n", msix->vector);
8133 ql_log(ql_log_warn, vha, 0x0184,
8134 "Out of MSI-X vectors!.\n");
8138 qpair->msix->in_use = 1;
8139 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8140 qpair->pdev = ha->pdev;
8141 if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
8142 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
8144 mutex_unlock(&ha->mq_lock);
8146 /* Create response queue first */
8147 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
8149 ql_log(ql_log_warn, vha, 0x0185,
8150 "Failed to create response queue.\n");
8154 qpair->rsp = ha->rsp_q_map[rsp_id];
8156 /* Create request queue */
8157 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
8160 ql_log(ql_log_warn, vha, 0x0186,
8161 "Failed to create request queue.\n");
8165 qpair->req = ha->req_q_map[req_id];
8166 qpair->rsp->req = qpair->req;
8167 qpair->rsp->qpair = qpair;
8168 /* init qpair to this cpu. Will adjust at run time. */
8169 qla_cpu_update(qpair, smp_processor_id());
8171 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
8172 if (ha->fw_attributes & BIT_4)
8173 qpair->difdix_supported = 1;
8176 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
8177 if (!qpair->srb_mempool) {
8178 ql_log(ql_log_warn, vha, 0xd036,
8179 "Failed to create srb mempool for qpair %d\n",
8184 /* Mark as online */
8187 if (!vha->flags.qpairs_available)
8188 vha->flags.qpairs_available = 1;
8190 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
8191 "Request/Response queue pair created, id %d\n",
8193 ql_dbg(ql_dbg_init, vha, 0x0187,
8194 "Request/Response queue pair created, id %d\n",
8201 qla25xx_delete_rsp_que(vha, qpair->rsp);
8203 mutex_lock(&ha->mq_lock);
8204 qpair->msix->in_use = 0;
8205 list_del(&qpair->qp_list_elem);
8206 if (list_empty(&vha->qp_list))
8207 vha->flags.qpairs_available = 0;
8209 ha->queue_pair_map[qpair_id] = NULL;
8210 clear_bit(qpair_id, ha->qpair_qid_map);
8212 mutex_unlock(&ha->mq_lock);
8218 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
8220 int ret = QLA_FUNCTION_FAILED;
8221 struct qla_hw_data *ha = qpair->hw;
8223 qpair->delete_in_progress = 1;
8224 while (atomic_read(&qpair->ref_count))
8227 ret = qla25xx_delete_req_que(vha, qpair->req);
8228 if (ret != QLA_SUCCESS)
8231 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
8232 if (ret != QLA_SUCCESS)
8235 mutex_lock(&ha->mq_lock);
8236 ha->queue_pair_map[qpair->id] = NULL;
8237 clear_bit(qpair->id, ha->qpair_qid_map);
8239 list_del(&qpair->qp_list_elem);
8240 if (list_empty(&vha->qp_list)) {
8241 vha->flags.qpairs_available = 0;
8242 vha->flags.qpairs_req_created = 0;
8243 vha->flags.qpairs_rsp_created = 0;
8245 mempool_destroy(qpair->srb_mempool);
8247 mutex_unlock(&ha->mq_lock);