2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
26 const char *const port_state_str[] = {
35 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
36 * @irq: interrupt number
37 * @dev_id: SCSI driver HA context
39 * Called by system whenever the host adapter generates an interrupt.
41 * Returns handled flag.
44 qla2100_intr_handler(int irq, void *dev_id)
47 struct qla_hw_data *ha;
48 struct device_reg_2xxx __iomem *reg;
56 rsp = (struct rsp_que *) dev_id;
58 ql_log(ql_log_info, NULL, 0x505d,
59 "%s: NULL response queue pointer.\n", __func__);
64 reg = &ha->iobase->isp;
67 spin_lock_irqsave(&ha->hardware_lock, flags);
68 vha = pci_get_drvdata(ha->pdev);
69 for (iter = 50; iter--; ) {
70 hccr = RD_REG_WORD(®->hccr);
71 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
73 if (hccr & HCCR_RISC_PAUSE) {
74 if (pci_channel_offline(ha->pdev))
78 * Issue a "HARD" reset in order for the RISC interrupt
79 * bit to be cleared. Schedule a big hammer to get
80 * out of the RISC PAUSED state.
82 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
83 RD_REG_WORD(®->hccr);
85 ha->isp_ops->fw_dump(vha, 1);
86 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
88 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
91 if (RD_REG_WORD(®->semaphore) & BIT_0) {
92 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
93 RD_REG_WORD(®->hccr);
95 /* Get mailbox data. */
96 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
97 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
98 qla2x00_mbx_completion(vha, mb[0]);
99 status |= MBX_INTERRUPT;
100 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
101 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
102 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
103 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
104 qla2x00_async_event(vha, rsp, mb);
107 ql_dbg(ql_dbg_async, vha, 0x5025,
108 "Unrecognized interrupt type (%d).\n",
111 /* Release mailbox registers. */
112 WRT_REG_WORD(®->semaphore, 0);
113 RD_REG_WORD(®->semaphore);
115 qla2x00_process_response_queue(rsp);
117 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
118 RD_REG_WORD(®->hccr);
121 qla2x00_handle_mbx_completion(ha, status);
122 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124 return (IRQ_HANDLED);
128 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
130 /* Check for PCI disconnection */
131 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
132 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
133 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
134 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
136 * Schedule this (only once) on the default system
137 * workqueue so that all the adapter workqueues and the
138 * DPC thread can be shutdown cleanly.
140 schedule_work(&vha->hw->board_disable);
148 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
150 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
154 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
155 * @irq: interrupt number
156 * @dev_id: SCSI driver HA context
158 * Called by system whenever the host adapter generates an interrupt.
160 * Returns handled flag.
163 qla2300_intr_handler(int irq, void *dev_id)
165 scsi_qla_host_t *vha;
166 struct device_reg_2xxx __iomem *reg;
173 struct qla_hw_data *ha;
176 rsp = (struct rsp_que *) dev_id;
178 ql_log(ql_log_info, NULL, 0x5058,
179 "%s: NULL response queue pointer.\n", __func__);
184 reg = &ha->iobase->isp;
187 spin_lock_irqsave(&ha->hardware_lock, flags);
188 vha = pci_get_drvdata(ha->pdev);
189 for (iter = 50; iter--; ) {
190 stat = RD_REG_DWORD(®->u.isp2300.host_status);
191 if (qla2x00_check_reg32_for_disconnect(vha, stat))
193 if (stat & HSR_RISC_PAUSED) {
194 if (unlikely(pci_channel_offline(ha->pdev)))
197 hccr = RD_REG_WORD(®->hccr);
199 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
200 ql_log(ql_log_warn, vha, 0x5026,
201 "Parity error -- HCCR=%x, Dumping "
202 "firmware.\n", hccr);
204 ql_log(ql_log_warn, vha, 0x5027,
205 "RISC paused -- HCCR=%x, Dumping "
206 "firmware.\n", hccr);
209 * Issue a "HARD" reset in order for the RISC
210 * interrupt bit to be cleared. Schedule a big
211 * hammer to get out of the RISC PAUSED state.
213 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
214 RD_REG_WORD(®->hccr);
216 ha->isp_ops->fw_dump(vha, 1);
217 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
219 } else if ((stat & HSR_RISC_INT) == 0)
222 switch (stat & 0xff) {
227 qla2x00_mbx_completion(vha, MSW(stat));
228 status |= MBX_INTERRUPT;
230 /* Release mailbox registers. */
231 WRT_REG_WORD(®->semaphore, 0);
235 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
236 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
237 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
238 qla2x00_async_event(vha, rsp, mb);
241 qla2x00_process_response_queue(rsp);
244 mb[0] = MBA_CMPLT_1_16BIT;
246 qla2x00_async_event(vha, rsp, mb);
249 mb[0] = MBA_SCSI_COMPLETION;
251 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
252 qla2x00_async_event(vha, rsp, mb);
255 ql_dbg(ql_dbg_async, vha, 0x5028,
256 "Unrecognized interrupt type (%d).\n", stat & 0xff);
259 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
260 RD_REG_WORD_RELAXED(®->hccr);
262 qla2x00_handle_mbx_completion(ha, status);
263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
265 return (IRQ_HANDLED);
269 * qla2x00_mbx_completion() - Process mailbox command completions.
270 * @vha: SCSI driver HA context
271 * @mb0: Mailbox0 register
274 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
278 uint16_t __iomem *wptr;
279 struct qla_hw_data *ha = vha->hw;
280 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
282 /* Read all mbox registers? */
283 WARN_ON_ONCE(ha->mbx_count > 32);
284 mboxes = (1ULL << ha->mbx_count) - 1;
286 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
288 mboxes = ha->mcp->in_mb;
290 /* Load return mailbox registers. */
291 ha->flags.mbox_int = 1;
292 ha->mailbox_out[0] = mb0;
294 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
296 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
297 if (IS_QLA2200(ha) && cnt == 8)
298 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
299 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
300 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
301 else if (mboxes & BIT_0)
302 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
310 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
312 static char *event[] =
313 { "Complete", "Request Notification", "Time Extension" };
315 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
316 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
317 uint16_t __iomem *wptr;
318 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
320 /* Seed data -- mailbox1 -> mailbox7. */
321 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
322 wptr = (uint16_t __iomem *)®24->mailbox1;
323 else if (IS_QLA8044(vha->hw))
324 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
328 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
329 mb[cnt] = RD_REG_WORD(wptr);
331 ql_dbg(ql_dbg_async, vha, 0x5021,
332 "Inter-Driver Communication %s -- "
333 "%04x %04x %04x %04x %04x %04x %04x.\n",
334 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
335 mb[4], mb[5], mb[6]);
337 /* Handle IDC Error completion case. */
338 case MBA_IDC_COMPLETE:
340 vha->hw->flags.idc_compl_status = 1;
341 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
342 complete(&vha->hw->dcbx_comp);
347 /* Acknowledgement needed? [Notify && non-zero timeout]. */
348 timeout = (descr >> 8) & 0xf;
349 ql_dbg(ql_dbg_async, vha, 0x5022,
350 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
351 vha->host_no, event[aen & 0xff], timeout);
355 rval = qla2x00_post_idc_ack_work(vha, mb);
356 if (rval != QLA_SUCCESS)
357 ql_log(ql_log_warn, vha, 0x5023,
358 "IDC failed to post ACK.\n");
360 case MBA_IDC_TIME_EXT:
361 vha->hw->idc_extend_tmo = descr;
362 ql_dbg(ql_dbg_async, vha, 0x5087,
363 "%lu Inter-Driver Communication %s -- "
364 "Extend timeout by=%d.\n",
365 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
372 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
374 static const char *const link_speeds[] = {
375 "1", "2", "?", "4", "8", "16", "32", "10"
377 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
379 if (IS_QLA2100(ha) || IS_QLA2200(ha))
380 return link_speeds[0];
381 else if (speed == 0x13)
382 return link_speeds[QLA_LAST_SPEED];
383 else if (speed < QLA_LAST_SPEED)
384 return link_speeds[speed];
386 return link_speeds[LS_UNKNOWN];
390 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
392 struct qla_hw_data *ha = vha->hw;
395 * 8200 AEN Interpretation:
397 * mb[1] = AEN Reason code
398 * mb[2] = LSW of Peg-Halt Status-1 Register
399 * mb[6] = MSW of Peg-Halt Status-1 Register
400 * mb[3] = LSW of Peg-Halt Status-2 register
401 * mb[7] = MSW of Peg-Halt Status-2 register
402 * mb[4] = IDC Device-State Register value
403 * mb[5] = IDC Driver-Presence Register value
405 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
406 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
407 mb[0], mb[1], mb[2], mb[6]);
408 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
409 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
410 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
412 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
413 IDC_HEARTBEAT_FAILURE)) {
414 ha->flags.nic_core_hung = 1;
415 ql_log(ql_log_warn, vha, 0x5060,
416 "83XX: F/W Error Reported: Check if reset required.\n");
418 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
419 uint32_t protocol_engine_id, fw_err_code, err_level;
422 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
423 * - PEG-Halt Status-1 Register:
424 * (LSW = mb[2], MSW = mb[6])
425 * Bits 0-7 = protocol-engine ID
426 * Bits 8-28 = f/w error code
427 * Bits 29-31 = Error-level
428 * Error-level 0x1 = Non-Fatal error
429 * Error-level 0x2 = Recoverable Fatal error
430 * Error-level 0x4 = UnRecoverable Fatal error
431 * - PEG-Halt Status-2 Register:
432 * (LSW = mb[3], MSW = mb[7])
434 protocol_engine_id = (mb[2] & 0xff);
435 fw_err_code = (((mb[2] & 0xff00) >> 8) |
436 ((mb[6] & 0x1fff) << 8));
437 err_level = ((mb[6] & 0xe000) >> 13);
438 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
439 "Register: protocol_engine_id=0x%x "
440 "fw_err_code=0x%x err_level=0x%x.\n",
441 protocol_engine_id, fw_err_code, err_level);
442 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
443 "Register: 0x%x%x.\n", mb[7], mb[3]);
444 if (err_level == ERR_LEVEL_NON_FATAL) {
445 ql_log(ql_log_warn, vha, 0x5063,
446 "Not a fatal error, f/w has recovered itself.\n");
447 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
448 ql_log(ql_log_fatal, vha, 0x5064,
449 "Recoverable Fatal error: Chip reset "
451 qla83xx_schedule_work(vha,
452 QLA83XX_NIC_CORE_RESET);
453 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
454 ql_log(ql_log_fatal, vha, 0x5065,
455 "Unrecoverable Fatal error: Set FAILED "
456 "state, reboot required.\n");
457 qla83xx_schedule_work(vha,
458 QLA83XX_NIC_CORE_UNRECOVERABLE);
462 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
463 uint16_t peg_fw_state, nw_interface_link_up;
464 uint16_t nw_interface_signal_detect, sfp_status;
465 uint16_t htbt_counter, htbt_monitor_enable;
466 uint16_t sfp_additional_info, sfp_multirate;
467 uint16_t sfp_tx_fault, link_speed, dcbx_status;
470 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
471 * - PEG-to-FC Status Register:
472 * (LSW = mb[2], MSW = mb[6])
473 * Bits 0-7 = Peg-Firmware state
474 * Bit 8 = N/W Interface Link-up
475 * Bit 9 = N/W Interface signal detected
476 * Bits 10-11 = SFP Status
477 * SFP Status 0x0 = SFP+ transceiver not expected
478 * SFP Status 0x1 = SFP+ transceiver not present
479 * SFP Status 0x2 = SFP+ transceiver invalid
480 * SFP Status 0x3 = SFP+ transceiver present and
482 * Bits 12-14 = Heartbeat Counter
483 * Bit 15 = Heartbeat Monitor Enable
484 * Bits 16-17 = SFP Additional Info
485 * SFP info 0x0 = Unregocnized transceiver for
487 * SFP info 0x1 = SFP+ brand validation failed
488 * SFP info 0x2 = SFP+ speed validation failed
489 * SFP info 0x3 = SFP+ access error
490 * Bit 18 = SFP Multirate
491 * Bit 19 = SFP Tx Fault
492 * Bits 20-22 = Link Speed
493 * Bits 23-27 = Reserved
494 * Bits 28-30 = DCBX Status
495 * DCBX Status 0x0 = DCBX Disabled
496 * DCBX Status 0x1 = DCBX Enabled
497 * DCBX Status 0x2 = DCBX Exchange error
500 peg_fw_state = (mb[2] & 0x00ff);
501 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
502 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
503 sfp_status = ((mb[2] & 0x0c00) >> 10);
504 htbt_counter = ((mb[2] & 0x7000) >> 12);
505 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
506 sfp_additional_info = (mb[6] & 0x0003);
507 sfp_multirate = ((mb[6] & 0x0004) >> 2);
508 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
509 link_speed = ((mb[6] & 0x0070) >> 4);
510 dcbx_status = ((mb[6] & 0x7000) >> 12);
512 ql_log(ql_log_warn, vha, 0x5066,
513 "Peg-to-Fc Status Register:\n"
514 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
515 "nw_interface_signal_detect=0x%x"
516 "\nsfp_statis=0x%x.\n ", peg_fw_state,
517 nw_interface_link_up, nw_interface_signal_detect,
519 ql_log(ql_log_warn, vha, 0x5067,
520 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
521 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
522 htbt_counter, htbt_monitor_enable,
523 sfp_additional_info, sfp_multirate);
524 ql_log(ql_log_warn, vha, 0x5068,
525 "sfp_tx_fault=0x%x, link_state=0x%x, "
526 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
532 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
533 ql_log(ql_log_warn, vha, 0x5069,
534 "Heartbeat Failure encountered, chip reset "
537 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
541 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
542 ql_log(ql_log_info, vha, 0x506a,
543 "IDC Device-State changed = 0x%x.\n", mb[4]);
544 if (ha->flags.nic_core_reset_owner)
546 qla83xx_schedule_work(vha, MBA_IDC_AEN);
551 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
553 struct qla_hw_data *ha = vha->hw;
562 spin_lock_irqsave(&ha->vport_slock, flags);
563 list_for_each_entry(vp, &ha->vp_list, list) {
564 vp_did = vp->d_id.b24;
565 if (vp_did == rscn_entry) {
570 spin_unlock_irqrestore(&ha->vport_slock, flags);
576 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
581 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
582 if (f->loop_id == loop_id)
588 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
593 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
594 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
597 else if (f->deleted == 0)
605 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
611 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
612 if (f->d_id.b24 == id->b24) {
615 else if (f->deleted == 0)
623 * qla2x00_async_event() - Process aynchronous events.
624 * @vha: SCSI driver HA context
625 * @rsp: response queue
626 * @mb: Mailbox registers (0 - 3)
629 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
634 struct qla_hw_data *ha = vha->hw;
635 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
636 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
637 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
638 uint32_t rscn_entry, host_pid;
640 fc_port_t *fcport = NULL;
642 if (!vha->hw->flags.fw_started)
645 /* Setup to process RIO completion. */
647 if (IS_CNA_CAPABLE(ha))
650 case MBA_SCSI_COMPLETION:
651 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
654 case MBA_CMPLT_1_16BIT:
657 mb[0] = MBA_SCSI_COMPLETION;
659 case MBA_CMPLT_2_16BIT:
663 mb[0] = MBA_SCSI_COMPLETION;
665 case MBA_CMPLT_3_16BIT:
670 mb[0] = MBA_SCSI_COMPLETION;
672 case MBA_CMPLT_4_16BIT:
676 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
678 mb[0] = MBA_SCSI_COMPLETION;
680 case MBA_CMPLT_5_16BIT:
684 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
685 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
687 mb[0] = MBA_SCSI_COMPLETION;
689 case MBA_CMPLT_2_32BIT:
690 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
691 handles[1] = le32_to_cpu(
692 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
693 RD_MAILBOX_REG(ha, reg, 6));
695 mb[0] = MBA_SCSI_COMPLETION;
702 case MBA_SCSI_COMPLETION: /* Fast Post */
703 if (!vha->flags.online)
706 for (cnt = 0; cnt < handle_cnt; cnt++)
707 qla2x00_process_completed_request(vha, rsp->req,
711 case MBA_RESET: /* Reset */
712 ql_dbg(ql_dbg_async, vha, 0x5002,
713 "Asynchronous RESET.\n");
715 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
718 case MBA_SYSTEM_ERR: /* System Error */
719 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
721 RD_REG_WORD(®24->mailbox7) : 0;
722 ql_log(ql_log_warn, vha, 0x5003,
723 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
724 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
726 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
727 RD_REG_WORD(®24->mailbox7) & BIT_8;
728 ha->isp_ops->fw_dump(vha, 1);
729 ha->flags.fw_init_done = 0;
732 if (IS_FWI2_CAPABLE(ha)) {
733 if (mb[1] == 0 && mb[2] == 0) {
734 ql_log(ql_log_fatal, vha, 0x5004,
735 "Unrecoverable Hardware Error: adapter "
736 "marked OFFLINE!\n");
737 vha->flags.online = 0;
738 vha->device_flags |= DFLG_DEV_FAILED;
740 /* Check to see if MPI timeout occurred */
741 if ((mbx & MBX_3) && (ha->port_no == 0))
742 set_bit(MPI_RESET_NEEDED,
745 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
747 } else if (mb[1] == 0) {
748 ql_log(ql_log_fatal, vha, 0x5005,
749 "Unrecoverable Hardware Error: adapter marked "
751 vha->flags.online = 0;
752 vha->device_flags |= DFLG_DEV_FAILED;
754 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
757 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
758 ql_log(ql_log_warn, vha, 0x5006,
759 "ISP Request Transfer Error (%x).\n", mb[1]);
761 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
764 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
765 ql_log(ql_log_warn, vha, 0x5007,
766 "ISP Response Transfer Error (%x).\n", mb[1]);
768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
771 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
772 ql_dbg(ql_dbg_async, vha, 0x5008,
773 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
776 case MBA_LOOP_INIT_ERR:
777 ql_log(ql_log_warn, vha, 0x5090,
778 "LOOP INIT ERROR (%x).\n", mb[1]);
779 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
782 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
783 ha->flags.lip_ae = 1;
785 ql_dbg(ql_dbg_async, vha, 0x5009,
786 "LIP occurred (%x).\n", mb[1]);
788 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
789 atomic_set(&vha->loop_state, LOOP_DOWN);
790 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
791 qla2x00_mark_all_devices_lost(vha, 1);
795 atomic_set(&vha->vp_state, VP_FAILED);
796 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
799 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
800 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
802 vha->flags.management_server_logged_in = 0;
803 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
806 case MBA_LOOP_UP: /* Loop Up Event */
807 if (IS_QLA2100(ha) || IS_QLA2200(ha))
808 ha->link_data_rate = PORT_SPEED_1GB;
810 ha->link_data_rate = mb[1];
812 ql_log(ql_log_info, vha, 0x500a,
813 "LOOP UP detected (%s Gbps).\n",
814 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
816 vha->flags.management_server_logged_in = 0;
817 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
819 if (AUTO_DETECT_SFP_SUPPORT(vha)) {
820 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
821 qla2xxx_wake_dpc(vha);
825 case MBA_LOOP_DOWN: /* Loop Down Event */
827 ha->flags.lip_ae = 0;
828 ha->current_topology = 0;
830 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
831 ? RD_REG_WORD(®24->mailbox4) : 0;
832 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
834 ql_log(ql_log_info, vha, 0x500b,
835 "LOOP DOWN detected (%x %x %x %x).\n",
836 mb[1], mb[2], mb[3], mbx);
838 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
839 atomic_set(&vha->loop_state, LOOP_DOWN);
840 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
842 * In case of loop down, restore WWPN from
843 * NVRAM in case of FA-WWPN capable ISP
844 * Restore for Physical Port only
847 if (ha->flags.fawwpn_enabled &&
848 (ha->current_topology == ISP_CFG_F)) {
849 void *wwpn = ha->init_cb->port_name;
851 memcpy(vha->port_name, wwpn, WWN_SIZE);
852 fc_host_port_name(vha->host) =
853 wwn_to_u64(vha->port_name);
854 ql_dbg(ql_dbg_init + ql_dbg_verbose,
855 vha, 0x00d8, "LOOP DOWN detected,"
856 "restore WWPN %016llx\n",
857 wwn_to_u64(vha->port_name));
860 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
863 vha->device_flags |= DFLG_NO_CABLE;
864 qla2x00_mark_all_devices_lost(vha, 1);
868 atomic_set(&vha->vp_state, VP_FAILED);
869 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
872 vha->flags.management_server_logged_in = 0;
873 ha->link_data_rate = PORT_SPEED_UNKNOWN;
874 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
877 case MBA_LIP_RESET: /* LIP reset occurred */
878 ql_dbg(ql_dbg_async, vha, 0x500c,
879 "LIP reset occurred (%x).\n", mb[1]);
881 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
882 atomic_set(&vha->loop_state, LOOP_DOWN);
883 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
884 qla2x00_mark_all_devices_lost(vha, 1);
888 atomic_set(&vha->vp_state, VP_FAILED);
889 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
892 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
894 ha->operating_mode = LOOP;
895 vha->flags.management_server_logged_in = 0;
896 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
899 /* case MBA_DCBX_COMPLETE: */
900 case MBA_POINT_TO_POINT: /* Point-to-Point */
901 ha->flags.lip_ae = 0;
906 if (IS_CNA_CAPABLE(ha)) {
907 ql_dbg(ql_dbg_async, vha, 0x500d,
908 "DCBX Completed -- %04x %04x %04x.\n",
909 mb[1], mb[2], mb[3]);
910 if (ha->notify_dcbx_comp && !vha->vp_idx)
911 complete(&ha->dcbx_comp);
914 ql_dbg(ql_dbg_async, vha, 0x500e,
915 "Asynchronous P2P MODE received.\n");
918 * Until there's a transition from loop down to loop up, treat
919 * this as loop down only.
921 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
922 atomic_set(&vha->loop_state, LOOP_DOWN);
923 if (!atomic_read(&vha->loop_down_timer))
924 atomic_set(&vha->loop_down_timer,
927 qla2x00_mark_all_devices_lost(vha, 1);
931 atomic_set(&vha->vp_state, VP_FAILED);
932 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
935 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
936 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
938 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
939 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
941 vha->flags.management_server_logged_in = 0;
944 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
948 ql_dbg(ql_dbg_async, vha, 0x500f,
949 "Configuration change detected: value=%x.\n", mb[1]);
951 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
952 atomic_set(&vha->loop_state, LOOP_DOWN);
953 if (!atomic_read(&vha->loop_down_timer))
954 atomic_set(&vha->loop_down_timer,
956 qla2x00_mark_all_devices_lost(vha, 1);
960 atomic_set(&vha->vp_state, VP_FAILED);
961 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
964 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
965 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
968 case MBA_PORT_UPDATE: /* Port database update */
970 * Handle only global and vn-port update events
973 * mb[1] = N_Port handle of changed port
974 * OR 0xffff for global event
975 * mb[2] = New login state
976 * 7 = Port logged out
977 * mb[3] = LSB is vp_idx, 0xff = all vps
979 * Skip processing if:
980 * Event is global, vp_idx is NOT all vps,
981 * vp_idx does not match
982 * Event is not global, vp_idx does not match
984 if (IS_QLA2XXX_MIDTYPE(ha) &&
985 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
986 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
990 ql_dbg(ql_dbg_async, vha, 0x5010,
991 "Port %s %04x %04x %04x.\n",
992 mb[1] == 0xffff ? "unavailable" : "logout",
993 mb[1], mb[2], mb[3]);
996 goto global_port_update;
998 if (mb[1] == NPH_SNS_LID(ha)) {
999 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1000 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1004 /* use handle_cnt for loop id/nport handle */
1005 if (IS_FWI2_CAPABLE(ha))
1006 handle_cnt = NPH_SNS;
1008 handle_cnt = SIMPLE_NAME_SERVER;
1009 if (mb[1] == handle_cnt) {
1010 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1011 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1016 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1019 if (atomic_read(&fcport->state) != FCS_ONLINE)
1021 ql_dbg(ql_dbg_async, vha, 0x508a,
1022 "Marking port lost loopid=%04x portid=%06x.\n",
1023 fcport->loop_id, fcport->d_id.b24);
1024 if (qla_ini_mode_enabled(vha)) {
1025 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1026 fcport->logout_on_delete = 0;
1027 qlt_schedule_sess_for_deletion(fcport);
1032 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1033 atomic_set(&vha->loop_state, LOOP_DOWN);
1034 atomic_set(&vha->loop_down_timer,
1036 vha->device_flags |= DFLG_NO_CABLE;
1037 qla2x00_mark_all_devices_lost(vha, 1);
1041 atomic_set(&vha->vp_state, VP_FAILED);
1042 fc_vport_set_state(vha->fc_vport,
1044 qla2x00_mark_all_devices_lost(vha, 1);
1047 vha->flags.management_server_logged_in = 0;
1048 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1053 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1054 * event etc. earlier indicating loop is down) then process
1055 * it. Otherwise ignore it and Wait for RSCN to come in.
1057 atomic_set(&vha->loop_down_timer, 0);
1058 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1059 !ha->flags.n2n_ae &&
1060 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1061 ql_dbg(ql_dbg_async, vha, 0x5011,
1062 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1063 mb[1], mb[2], mb[3]);
1067 ql_dbg(ql_dbg_async, vha, 0x5012,
1068 "Port database changed %04x %04x %04x.\n",
1069 mb[1], mb[2], mb[3]);
1072 * Mark all devices as missing so we will login again.
1074 atomic_set(&vha->loop_state, LOOP_UP);
1075 vha->scan.scan_retry = 0;
1077 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1078 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1079 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1082 case MBA_RSCN_UPDATE: /* State Change Registration */
1083 /* Check if the Vport has issued a SCR */
1084 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1086 /* Only handle SCNs for our Vport index. */
1087 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1090 ql_dbg(ql_dbg_async, vha, 0x5013,
1091 "RSCN database changed -- %04x %04x %04x.\n",
1092 mb[1], mb[2], mb[3]);
1094 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1095 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1096 | vha->d_id.b.al_pa;
1097 if (rscn_entry == host_pid) {
1098 ql_dbg(ql_dbg_async, vha, 0x5014,
1099 "Ignoring RSCN update to local host "
1100 "port ID (%06x).\n", host_pid);
1104 /* Ignore reserved bits from RSCN-payload. */
1105 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1107 /* Skip RSCNs for virtual ports on the same physical port */
1108 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1111 atomic_set(&vha->loop_down_timer, 0);
1112 vha->flags.management_server_logged_in = 0;
1114 struct event_arg ea;
1116 memset(&ea, 0, sizeof(ea));
1117 ea.id.b24 = rscn_entry;
1118 ea.id.b.rsvd_1 = rscn_entry >> 24;
1119 qla2x00_handle_rscn(vha, &ea);
1120 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1123 /* case MBA_RIO_RESPONSE: */
1124 case MBA_ZIO_RESPONSE:
1125 ql_dbg(ql_dbg_async, vha, 0x5015,
1126 "[R|Z]IO update completion.\n");
1128 if (IS_FWI2_CAPABLE(ha))
1129 qla24xx_process_response_queue(vha, rsp);
1131 qla2x00_process_response_queue(rsp);
1134 case MBA_DISCARD_RND_FRAME:
1135 ql_dbg(ql_dbg_async, vha, 0x5016,
1136 "Discard RND Frame -- %04x %04x %04x.\n",
1137 mb[1], mb[2], mb[3]);
1140 case MBA_TRACE_NOTIFICATION:
1141 ql_dbg(ql_dbg_async, vha, 0x5017,
1142 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1145 case MBA_ISP84XX_ALERT:
1146 ql_dbg(ql_dbg_async, vha, 0x5018,
1147 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1148 mb[1], mb[2], mb[3]);
1150 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1152 case A84_PANIC_RECOVERY:
1153 ql_log(ql_log_info, vha, 0x5019,
1154 "Alert 84XX: panic recovery %04x %04x.\n",
1157 case A84_OP_LOGIN_COMPLETE:
1158 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1159 ql_log(ql_log_info, vha, 0x501a,
1160 "Alert 84XX: firmware version %x.\n",
1161 ha->cs84xx->op_fw_version);
1163 case A84_DIAG_LOGIN_COMPLETE:
1164 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1165 ql_log(ql_log_info, vha, 0x501b,
1166 "Alert 84XX: diagnostic firmware version %x.\n",
1167 ha->cs84xx->diag_fw_version);
1169 case A84_GOLD_LOGIN_COMPLETE:
1170 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1171 ha->cs84xx->fw_update = 1;
1172 ql_log(ql_log_info, vha, 0x501c,
1173 "Alert 84XX: gold firmware version %x.\n",
1174 ha->cs84xx->gold_fw_version);
1177 ql_log(ql_log_warn, vha, 0x501d,
1178 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1179 mb[1], mb[2], mb[3]);
1181 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1183 case MBA_DCBX_START:
1184 ql_dbg(ql_dbg_async, vha, 0x501e,
1185 "DCBX Started -- %04x %04x %04x.\n",
1186 mb[1], mb[2], mb[3]);
1188 case MBA_DCBX_PARAM_UPDATE:
1189 ql_dbg(ql_dbg_async, vha, 0x501f,
1190 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1191 mb[1], mb[2], mb[3]);
1193 case MBA_FCF_CONF_ERR:
1194 ql_dbg(ql_dbg_async, vha, 0x5020,
1195 "FCF Configuration Error -- %04x %04x %04x.\n",
1196 mb[1], mb[2], mb[3]);
1198 case MBA_IDC_NOTIFY:
1199 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1200 mb[4] = RD_REG_WORD(®24->mailbox4);
1201 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1202 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1203 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1204 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1206 * Extend loop down timer since port is active.
1208 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1209 atomic_set(&vha->loop_down_timer,
1211 qla2xxx_wake_dpc(vha);
1215 case MBA_IDC_COMPLETE:
1216 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1217 complete(&ha->lb_portup_comp);
1219 case MBA_IDC_TIME_EXT:
1220 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1222 qla81xx_idc_event(vha, mb[0], mb[1]);
1226 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1227 ha->flags.fw_init_done = 0;
1228 ql_log(ql_log_warn, vha, 0xffff,
1229 "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
1230 mb[0], mb[1], mb[2], mb[3]);
1232 if ((mb[1] & BIT_8) ||
1234 ql_log(ql_log_warn, vha, 0xd013,
1235 "MPI Heartbeat stop. FW dump needed\n");
1236 ha->fw_dump_mpi = 1;
1237 ha->isp_ops->fw_dump(vha, 1);
1239 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1240 qla2xxx_wake_dpc(vha);
1241 } else if (IS_QLA83XX(ha)) {
1242 mb[4] = RD_REG_WORD(®24->mailbox4);
1243 mb[5] = RD_REG_WORD(®24->mailbox5);
1244 mb[6] = RD_REG_WORD(®24->mailbox6);
1245 mb[7] = RD_REG_WORD(®24->mailbox7);
1246 qla83xx_handle_8200_aen(vha, mb);
1248 ql_dbg(ql_dbg_async, vha, 0x5052,
1249 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1250 mb[0], mb[1], mb[2], mb[3]);
1254 case MBA_DPORT_DIAGNOSTICS:
1255 ql_dbg(ql_dbg_async, vha, 0x5052,
1256 "D-Port Diagnostics: %04x result=%s\n",
1258 mb[1] == 0 ? "start" :
1259 mb[1] == 1 ? "done (pass)" :
1260 mb[1] == 2 ? "done (error)" : "other");
1263 case MBA_TEMPERATURE_ALERT:
1264 ql_dbg(ql_dbg_async, vha, 0x505e,
1265 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1267 schedule_work(&ha->board_disable);
1270 case MBA_TRANS_INSERT:
1271 ql_dbg(ql_dbg_async, vha, 0x5091,
1272 "Transceiver Insertion: %04x\n", mb[1]);
1276 ql_dbg(ql_dbg_async, vha, 0x5057,
1277 "Unknown AEN:%04x %04x %04x %04x\n",
1278 mb[0], mb[1], mb[2], mb[3]);
1281 qlt_async_event(mb[0], vha, mb);
1283 if (!vha->vp_idx && ha->num_vhosts)
1284 qla2x00_alert_all_vps(rsp, mb);
1288 * qla2x00_process_completed_request() - Process a Fast Post response.
1289 * @vha: SCSI driver HA context
1290 * @req: request queue
1294 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1295 struct req_que *req, uint32_t index)
1298 struct qla_hw_data *ha = vha->hw;
1300 /* Validate handle. */
1301 if (index >= req->num_outstanding_cmds) {
1302 ql_log(ql_log_warn, vha, 0x3014,
1303 "Invalid SCSI command index (%x).\n", index);
1305 if (IS_P3P_TYPE(ha))
1306 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1308 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1312 sp = req->outstanding_cmds[index];
1314 /* Free outstanding command slot. */
1315 req->outstanding_cmds[index] = NULL;
1317 /* Save ISP completion status */
1318 sp->done(sp, DID_OK << 16);
1320 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1322 if (IS_P3P_TYPE(ha))
1323 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1325 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1330 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1331 struct req_que *req, void *iocb)
1333 struct qla_hw_data *ha = vha->hw;
1334 sts_entry_t *pkt = iocb;
1338 index = LSW(pkt->handle);
1339 if (index >= req->num_outstanding_cmds) {
1340 ql_log(ql_log_warn, vha, 0x5031,
1341 "Invalid command index (%x) type %8ph.\n",
1343 if (IS_P3P_TYPE(ha))
1344 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1346 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1349 sp = req->outstanding_cmds[index];
1351 ql_log(ql_log_warn, vha, 0x5032,
1352 "Invalid completion handle (%x) -- timed-out.\n", index);
1355 if (sp->handle != index) {
1356 ql_log(ql_log_warn, vha, 0x5033,
1357 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1361 req->outstanding_cmds[index] = NULL;
1368 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1369 struct mbx_entry *mbx)
1371 const char func[] = "MBX-IOCB";
1375 struct srb_iocb *lio;
1379 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1383 lio = &sp->u.iocb_cmd;
1385 fcport = sp->fcport;
1386 data = lio->u.logio.data;
1388 data[0] = MBS_COMMAND_ERROR;
1389 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1390 QLA_LOGIO_LOGIN_RETRIED : 0;
1391 if (mbx->entry_status) {
1392 ql_dbg(ql_dbg_async, vha, 0x5043,
1393 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1394 "entry-status=%x status=%x state-flag=%x "
1395 "status-flags=%x.\n", type, sp->handle,
1396 fcport->d_id.b.domain, fcport->d_id.b.area,
1397 fcport->d_id.b.al_pa, mbx->entry_status,
1398 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1399 le16_to_cpu(mbx->status_flags));
1401 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1407 status = le16_to_cpu(mbx->status);
1408 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1409 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1411 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1412 ql_dbg(ql_dbg_async, vha, 0x5045,
1413 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1414 type, sp->handle, fcport->d_id.b.domain,
1415 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1416 le16_to_cpu(mbx->mb1));
1418 data[0] = MBS_COMMAND_COMPLETE;
1419 if (sp->type == SRB_LOGIN_CMD) {
1420 fcport->port_type = FCT_TARGET;
1421 if (le16_to_cpu(mbx->mb1) & BIT_0)
1422 fcport->port_type = FCT_INITIATOR;
1423 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1424 fcport->flags |= FCF_FCP2_DEVICE;
1429 data[0] = le16_to_cpu(mbx->mb0);
1431 case MBS_PORT_ID_USED:
1432 data[1] = le16_to_cpu(mbx->mb1);
1434 case MBS_LOOP_ID_USED:
1437 data[0] = MBS_COMMAND_ERROR;
1441 ql_log(ql_log_warn, vha, 0x5046,
1442 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1443 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1444 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1445 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1446 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1447 le16_to_cpu(mbx->mb7));
1454 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1455 struct mbx_24xx_entry *pkt)
1457 const char func[] = "MBX-IOCB2";
1459 struct srb_iocb *si;
1463 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1467 si = &sp->u.iocb_cmd;
1468 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1470 for (i = 0; i < sz; i++)
1471 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
1473 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1479 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1480 struct nack_to_isp *pkt)
1482 const char func[] = "nack";
1486 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1490 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1491 res = QLA_FUNCTION_FAILED;
1497 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1498 sts_entry_t *pkt, int iocb_type)
1500 const char func[] = "CT_IOCB";
1503 struct bsg_job *bsg_job;
1504 struct fc_bsg_reply *bsg_reply;
1505 uint16_t comp_status;
1508 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1514 bsg_job = sp->u.bsg_job;
1515 bsg_reply = bsg_job->reply;
1517 type = "ct pass-through";
1519 comp_status = le16_to_cpu(pkt->comp_status);
1522 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1523 * fc payload to the caller
1525 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1526 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1528 if (comp_status != CS_COMPLETE) {
1529 if (comp_status == CS_DATA_UNDERRUN) {
1531 bsg_reply->reply_payload_rcv_len =
1532 le16_to_cpu(pkt->rsp_info_len);
1534 ql_log(ql_log_warn, vha, 0x5048,
1535 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1537 bsg_reply->reply_payload_rcv_len);
1539 ql_log(ql_log_warn, vha, 0x5049,
1540 "CT pass-through-%s error comp_status=0x%x.\n",
1542 res = DID_ERROR << 16;
1543 bsg_reply->reply_payload_rcv_len = 0;
1545 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1549 bsg_reply->reply_payload_rcv_len =
1550 bsg_job->reply_payload.payload_len;
1551 bsg_job->reply_len = 0;
1554 case SRB_CT_PTHRU_CMD:
1556 * borrowing sts_entry_24xx.comp_status.
1557 * same location as ct_entry_24xx.comp_status
1559 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1560 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1569 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1570 struct sts_entry_24xx *pkt, int iocb_type)
1572 const char func[] = "ELS_CT_IOCB";
1575 struct bsg_job *bsg_job;
1576 struct fc_bsg_reply *bsg_reply;
1577 uint16_t comp_status;
1578 uint32_t fw_status[3];
1580 struct srb_iocb *els;
1582 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1588 case SRB_ELS_CMD_RPT:
1589 case SRB_ELS_CMD_HST:
1593 type = "ct pass-through";
1596 type = "Driver ELS logo";
1597 if (iocb_type != ELS_IOCB_TYPE) {
1598 ql_dbg(ql_dbg_user, vha, 0x5047,
1599 "Completing %s: (%p) type=%d.\n",
1600 type, sp, sp->type);
1605 case SRB_CT_PTHRU_CMD:
1606 /* borrowing sts_entry_24xx.comp_status.
1607 same location as ct_entry_24xx.comp_status
1609 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1610 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1615 ql_dbg(ql_dbg_user, vha, 0x503e,
1616 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1620 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1621 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
1622 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
1624 if (iocb_type == ELS_IOCB_TYPE) {
1625 els = &sp->u.iocb_cmd;
1626 els->u.els_plogi.fw_status[0] = fw_status[0];
1627 els->u.els_plogi.fw_status[1] = fw_status[1];
1628 els->u.els_plogi.fw_status[2] = fw_status[2];
1629 els->u.els_plogi.comp_status = fw_status[0];
1630 if (comp_status == CS_COMPLETE) {
1633 if (comp_status == CS_DATA_UNDERRUN) {
1635 els->u.els_plogi.len =
1636 le16_to_cpu(((struct els_sts_entry_24xx *)
1637 pkt)->total_byte_count);
1639 els->u.els_plogi.len = 0;
1640 res = DID_ERROR << 16;
1643 ql_dbg(ql_dbg_user, vha, 0x503f,
1644 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
1645 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1646 le16_to_cpu(((struct els_sts_entry_24xx *)
1647 pkt)->total_byte_count));
1651 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1652 * fc payload to the caller
1654 bsg_job = sp->u.bsg_job;
1655 bsg_reply = bsg_job->reply;
1656 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1657 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1659 if (comp_status != CS_COMPLETE) {
1660 if (comp_status == CS_DATA_UNDERRUN) {
1662 bsg_reply->reply_payload_rcv_len =
1663 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1665 ql_dbg(ql_dbg_user, vha, 0x503f,
1666 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1667 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1668 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1669 le16_to_cpu(((struct els_sts_entry_24xx *)
1670 pkt)->total_byte_count));
1672 ql_dbg(ql_dbg_user, vha, 0x5040,
1673 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1674 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1675 type, sp->handle, comp_status,
1676 le16_to_cpu(((struct els_sts_entry_24xx *)
1677 pkt)->error_subcode_1),
1678 le16_to_cpu(((struct els_sts_entry_24xx *)
1679 pkt)->error_subcode_2));
1680 res = DID_ERROR << 16;
1681 bsg_reply->reply_payload_rcv_len = 0;
1683 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
1684 fw_status, sizeof(fw_status));
1685 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1690 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1691 bsg_job->reply_len = 0;
1699 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1700 struct logio_entry_24xx *logio)
1702 const char func[] = "LOGIO-IOCB";
1706 struct srb_iocb *lio;
1710 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1714 lio = &sp->u.iocb_cmd;
1716 fcport = sp->fcport;
1717 data = lio->u.logio.data;
1719 data[0] = MBS_COMMAND_ERROR;
1720 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1721 QLA_LOGIO_LOGIN_RETRIED : 0;
1722 if (logio->entry_status) {
1723 ql_log(ql_log_warn, fcport->vha, 0x5034,
1724 "Async-%s error entry - %8phC hdl=%x"
1725 "portid=%02x%02x%02x entry-status=%x.\n",
1726 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1727 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1728 logio->entry_status);
1729 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1730 logio, sizeof(*logio));
1735 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1736 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1737 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1738 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1739 fcport->d_id.b.domain,
1740 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1741 le32_to_cpu(logio->io_parameter[0]));
1743 vha->hw->exch_starvation = 0;
1744 data[0] = MBS_COMMAND_COMPLETE;
1746 if (sp->type == SRB_PRLI_CMD) {
1747 lio->u.logio.iop[0] =
1748 le32_to_cpu(logio->io_parameter[0]);
1749 lio->u.logio.iop[1] =
1750 le32_to_cpu(logio->io_parameter[1]);
1754 if (sp->type != SRB_LOGIN_CMD)
1757 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1758 if (iop[0] & BIT_4) {
1759 fcport->port_type = FCT_TARGET;
1761 fcport->flags |= FCF_FCP2_DEVICE;
1762 } else if (iop[0] & BIT_5)
1763 fcport->port_type = FCT_INITIATOR;
1766 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1768 if (logio->io_parameter[7] || logio->io_parameter[8])
1769 fcport->supported_classes |= FC_COS_CLASS2;
1770 if (logio->io_parameter[9] || logio->io_parameter[10])
1771 fcport->supported_classes |= FC_COS_CLASS3;
1776 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1777 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1778 lio->u.logio.iop[0] = iop[0];
1779 lio->u.logio.iop[1] = iop[1];
1781 case LSC_SCODE_PORTID_USED:
1782 data[0] = MBS_PORT_ID_USED;
1783 data[1] = LSW(iop[1]);
1785 case LSC_SCODE_NPORT_USED:
1786 data[0] = MBS_LOOP_ID_USED;
1788 case LSC_SCODE_CMD_FAILED:
1789 if (iop[1] == 0x0606) {
1791 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
1792 * Target side acked.
1794 data[0] = MBS_COMMAND_COMPLETE;
1797 data[0] = MBS_COMMAND_ERROR;
1799 case LSC_SCODE_NOXCB:
1800 vha->hw->exch_starvation++;
1801 if (vha->hw->exch_starvation > 5) {
1802 ql_log(ql_log_warn, vha, 0xd046,
1803 "Exchange starvation. Resetting RISC\n");
1805 vha->hw->exch_starvation = 0;
1807 if (IS_P3P_TYPE(vha->hw))
1808 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1810 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1811 qla2xxx_wake_dpc(vha);
1815 data[0] = MBS_COMMAND_ERROR;
1819 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1820 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1821 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1822 sp->handle, fcport->d_id.b.domain,
1823 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1824 le16_to_cpu(logio->comp_status),
1825 le32_to_cpu(logio->io_parameter[0]),
1826 le32_to_cpu(logio->io_parameter[1]));
1833 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1835 const char func[] = "TMF-IOCB";
1839 struct srb_iocb *iocb;
1840 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1842 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1846 iocb = &sp->u.iocb_cmd;
1848 fcport = sp->fcport;
1849 iocb->u.tmf.data = QLA_SUCCESS;
1851 if (sts->entry_status) {
1852 ql_log(ql_log_warn, fcport->vha, 0x5038,
1853 "Async-%s error - hdl=%x entry-status(%x).\n",
1854 type, sp->handle, sts->entry_status);
1855 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1856 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1857 ql_log(ql_log_warn, fcport->vha, 0x5039,
1858 "Async-%s error - hdl=%x completion status(%x).\n",
1859 type, sp->handle, sts->comp_status);
1860 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1861 } else if ((le16_to_cpu(sts->scsi_status) &
1862 SS_RESPONSE_INFO_LEN_VALID)) {
1863 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1864 ql_log(ql_log_warn, fcport->vha, 0x503b,
1865 "Async-%s error - hdl=%x not enough response(%d).\n",
1866 type, sp->handle, sts->rsp_data_len);
1867 } else if (sts->data[3]) {
1868 ql_log(ql_log_warn, fcport->vha, 0x503c,
1869 "Async-%s error - hdl=%x response(%x).\n",
1870 type, sp->handle, sts->data[3]);
1871 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1875 if (iocb->u.tmf.data != QLA_SUCCESS)
1876 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
1882 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1883 void *tsk, srb_t *sp)
1886 struct srb_iocb *iocb;
1887 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1888 uint16_t state_flags;
1889 struct nvmefc_fcp_req *fd;
1890 uint16_t ret = QLA_SUCCESS;
1891 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1893 iocb = &sp->u.iocb_cmd;
1894 fcport = sp->fcport;
1895 iocb->u.nvme.comp_status = comp_status;
1896 state_flags = le16_to_cpu(sts->state_flags);
1897 fd = iocb->u.nvme.desc;
1899 if (unlikely(iocb->u.nvme.aen_op))
1900 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
1903 * State flags: Bit 6 and 0.
1904 * If 0 is set, we don't care about 6.
1905 * both cases resp was dma'd to host buffer
1906 * if both are 0, that is good path case.
1907 * if six is set and 0 is clear, we need to
1908 * copy resp data from status iocb to resp buffer.
1910 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
1911 iocb->u.nvme.rsp_pyld_len = 0;
1912 } else if ((state_flags & SF_FCP_RSP_DMA)) {
1913 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1914 } else if (state_flags & SF_NVME_ERSP) {
1915 uint32_t *inbuf, *outbuf;
1918 inbuf = (uint32_t *)&sts->nvme_ersp_data;
1919 outbuf = (uint32_t *)fd->rspaddr;
1920 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1921 iter = iocb->u.nvme.rsp_pyld_len >> 2;
1922 for (; iter; iter--)
1923 *outbuf++ = swab32(*inbuf++);
1924 } else { /* unhandled case */
1925 ql_log(ql_log_warn, fcport->vha, 0x503a,
1926 "NVME-%s error. Unhandled state_flags of %x\n",
1927 sp->name, state_flags);
1930 fd->transferred_length = fd->payload_length -
1931 le32_to_cpu(sts->residual_len);
1933 if (unlikely(comp_status != CS_COMPLETE))
1934 ql_log(ql_log_warn, fcport->vha, 0x5060,
1935 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
1936 sp->name, sp->handle, comp_status,
1937 fd->transferred_length, le32_to_cpu(sts->residual_len),
1941 * If transport error then Failure (HBA rejects request)
1942 * otherwise transport will handle.
1944 switch (comp_status) {
1949 case CS_PORT_UNAVAILABLE:
1950 case CS_PORT_LOGGED_OUT:
1951 fcport->nvme_flag |= NVME_FLAG_RESETTING;
1955 fd->transferred_length = 0;
1956 iocb->u.nvme.rsp_pyld_len = 0;
1959 case CS_DATA_UNDERRUN:
1962 ret = QLA_FUNCTION_FAILED;
1968 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
1969 struct vp_ctrl_entry_24xx *vce)
1971 const char func[] = "CTRLVP-IOCB";
1973 int rval = QLA_SUCCESS;
1975 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
1979 if (vce->entry_status != 0) {
1980 ql_dbg(ql_dbg_vport, vha, 0x10c4,
1981 "%s: Failed to complete IOCB -- error status (%x)\n",
1982 sp->name, vce->entry_status);
1983 rval = QLA_FUNCTION_FAILED;
1984 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
1985 ql_dbg(ql_dbg_vport, vha, 0x10c5,
1986 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
1987 sp->name, le16_to_cpu(vce->comp_status),
1988 le16_to_cpu(vce->vp_idx_failed));
1989 rval = QLA_FUNCTION_FAILED;
1991 ql_dbg(ql_dbg_vport, vha, 0x10c6,
1992 "Done %s.\n", __func__);
1999 /* Process a single response queue entry. */
2000 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2001 struct rsp_que *rsp,
2004 sts21_entry_t *sts21_entry;
2005 sts22_entry_t *sts22_entry;
2006 uint16_t handle_cnt;
2009 switch (pkt->entry_type) {
2011 qla2x00_status_entry(vha, rsp, pkt);
2013 case STATUS_TYPE_21:
2014 sts21_entry = (sts21_entry_t *)pkt;
2015 handle_cnt = sts21_entry->handle_count;
2016 for (cnt = 0; cnt < handle_cnt; cnt++)
2017 qla2x00_process_completed_request(vha, rsp->req,
2018 sts21_entry->handle[cnt]);
2020 case STATUS_TYPE_22:
2021 sts22_entry = (sts22_entry_t *)pkt;
2022 handle_cnt = sts22_entry->handle_count;
2023 for (cnt = 0; cnt < handle_cnt; cnt++)
2024 qla2x00_process_completed_request(vha, rsp->req,
2025 sts22_entry->handle[cnt]);
2027 case STATUS_CONT_TYPE:
2028 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2031 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2034 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2037 /* Type Not Supported. */
2038 ql_log(ql_log_warn, vha, 0x504a,
2039 "Received unknown response pkt type %x entry status=%x.\n",
2040 pkt->entry_type, pkt->entry_status);
2046 * qla2x00_process_response_queue() - Process response queue entries.
2047 * @rsp: response queue
2050 qla2x00_process_response_queue(struct rsp_que *rsp)
2052 struct scsi_qla_host *vha;
2053 struct qla_hw_data *ha = rsp->hw;
2054 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2057 vha = pci_get_drvdata(ha->pdev);
2059 if (!vha->flags.online)
2062 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2063 pkt = (sts_entry_t *)rsp->ring_ptr;
2066 if (rsp->ring_index == rsp->length) {
2067 rsp->ring_index = 0;
2068 rsp->ring_ptr = rsp->ring;
2073 if (pkt->entry_status != 0) {
2074 qla2x00_error_entry(vha, rsp, pkt);
2075 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2080 qla2x00_process_response_entry(vha, rsp, pkt);
2081 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2085 /* Adjust ring index */
2086 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2090 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2091 uint32_t sense_len, struct rsp_que *rsp, int res)
2093 struct scsi_qla_host *vha = sp->vha;
2094 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2095 uint32_t track_sense_len;
2097 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2098 sense_len = SCSI_SENSE_BUFFERSIZE;
2100 SET_CMD_SENSE_LEN(sp, sense_len);
2101 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2102 track_sense_len = sense_len;
2104 if (sense_len > par_sense_len)
2105 sense_len = par_sense_len;
2107 memcpy(cp->sense_buffer, sense_data, sense_len);
2109 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2110 track_sense_len -= sense_len;
2111 SET_CMD_SENSE_LEN(sp, track_sense_len);
2113 if (track_sense_len != 0) {
2114 rsp->status_srb = sp;
2119 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2120 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2121 sp->vha->host_no, cp->device->id, cp->device->lun,
2123 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2124 cp->sense_buffer, sense_len);
2128 struct scsi_dif_tuple {
2129 __be16 guard; /* Checksum */
2130 __be16 app_tag; /* APPL identifier */
2131 __be32 ref_tag; /* Target LBA or indirect LBA */
2135 * Checks the guard or meta-data for the type of error
2136 * detected by the HBA. In case of errors, we set the
2137 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2138 * to indicate to the kernel that the HBA detected error.
2141 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2143 struct scsi_qla_host *vha = sp->vha;
2144 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2145 uint8_t *ap = &sts24->data[12];
2146 uint8_t *ep = &sts24->data[20];
2147 uint32_t e_ref_tag, a_ref_tag;
2148 uint16_t e_app_tag, a_app_tag;
2149 uint16_t e_guard, a_guard;
2152 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2153 * would make guard field appear at offset 2
2155 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
2156 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
2157 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
2158 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
2159 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
2160 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
2162 ql_dbg(ql_dbg_io, vha, 0x3023,
2163 "iocb(s) %p Returned STATUS.\n", sts24);
2165 ql_dbg(ql_dbg_io, vha, 0x3024,
2166 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2167 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2168 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2169 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2170 a_app_tag, e_app_tag, a_guard, e_guard);
2174 * For type 3: ref & app tag is all 'f's
2175 * For type 0,1,2: app tag is all 'f's
2177 if ((a_app_tag == T10_PI_APP_ESCAPE) &&
2178 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
2179 (a_ref_tag == T10_PI_REF_ESCAPE))) {
2180 uint32_t blocks_done, resid;
2181 sector_t lba_s = scsi_get_lba(cmd);
2183 /* 2TB boundary case covered automatically with this */
2184 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2186 resid = scsi_bufflen(cmd) - (blocks_done *
2187 cmd->device->sector_size);
2189 scsi_set_resid(cmd, resid);
2190 cmd->result = DID_OK << 16;
2192 /* Update protection tag */
2193 if (scsi_prot_sg_count(cmd)) {
2194 uint32_t i, j = 0, k = 0, num_ent;
2195 struct scatterlist *sg;
2196 struct t10_pi_tuple *spt;
2198 /* Patch the corresponding protection tags */
2199 scsi_for_each_prot_sg(cmd, sg,
2200 scsi_prot_sg_count(cmd), i) {
2201 num_ent = sg_dma_len(sg) / 8;
2202 if (k + num_ent < blocks_done) {
2206 j = blocks_done - k - 1;
2211 if (k != blocks_done) {
2212 ql_log(ql_log_warn, vha, 0x302f,
2213 "unexpected tag values tag:lba=%x:%llx)\n",
2214 e_ref_tag, (unsigned long long)lba_s);
2218 spt = page_address(sg_page(sg)) + sg->offset;
2221 spt->app_tag = T10_PI_APP_ESCAPE;
2222 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2223 spt->ref_tag = T10_PI_REF_ESCAPE;
2230 if (e_guard != a_guard) {
2231 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2233 set_driver_byte(cmd, DRIVER_SENSE);
2234 set_host_byte(cmd, DID_ABORT);
2235 cmd->result |= SAM_STAT_CHECK_CONDITION;
2240 if (e_ref_tag != a_ref_tag) {
2241 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2243 set_driver_byte(cmd, DRIVER_SENSE);
2244 set_host_byte(cmd, DID_ABORT);
2245 cmd->result |= SAM_STAT_CHECK_CONDITION;
2249 /* check appl tag */
2250 if (e_app_tag != a_app_tag) {
2251 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2253 set_driver_byte(cmd, DRIVER_SENSE);
2254 set_host_byte(cmd, DID_ABORT);
2255 cmd->result |= SAM_STAT_CHECK_CONDITION;
2263 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2264 struct req_que *req, uint32_t index)
2266 struct qla_hw_data *ha = vha->hw;
2268 uint16_t comp_status;
2269 uint16_t scsi_status;
2271 uint32_t rval = EXT_STATUS_OK;
2272 struct bsg_job *bsg_job = NULL;
2273 struct fc_bsg_request *bsg_request;
2274 struct fc_bsg_reply *bsg_reply;
2275 sts_entry_t *sts = pkt;
2276 struct sts_entry_24xx *sts24 = pkt;
2278 /* Validate handle. */
2279 if (index >= req->num_outstanding_cmds) {
2280 ql_log(ql_log_warn, vha, 0x70af,
2281 "Invalid SCSI completion handle 0x%x.\n", index);
2282 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2286 sp = req->outstanding_cmds[index];
2288 ql_log(ql_log_warn, vha, 0x70b0,
2289 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2292 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2296 /* Free outstanding command slot. */
2297 req->outstanding_cmds[index] = NULL;
2298 bsg_job = sp->u.bsg_job;
2299 bsg_request = bsg_job->request;
2300 bsg_reply = bsg_job->reply;
2302 if (IS_FWI2_CAPABLE(ha)) {
2303 comp_status = le16_to_cpu(sts24->comp_status);
2304 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2306 comp_status = le16_to_cpu(sts->comp_status);
2307 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2310 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2311 switch (comp_status) {
2313 if (scsi_status == 0) {
2314 bsg_reply->reply_payload_rcv_len =
2315 bsg_job->reply_payload.payload_len;
2316 vha->qla_stats.input_bytes +=
2317 bsg_reply->reply_payload_rcv_len;
2318 vha->qla_stats.input_requests++;
2319 rval = EXT_STATUS_OK;
2323 case CS_DATA_OVERRUN:
2324 ql_dbg(ql_dbg_user, vha, 0x70b1,
2325 "Command completed with data overrun thread_id=%d\n",
2327 rval = EXT_STATUS_DATA_OVERRUN;
2330 case CS_DATA_UNDERRUN:
2331 ql_dbg(ql_dbg_user, vha, 0x70b2,
2332 "Command completed with data underrun thread_id=%d\n",
2334 rval = EXT_STATUS_DATA_UNDERRUN;
2336 case CS_BIDIR_RD_OVERRUN:
2337 ql_dbg(ql_dbg_user, vha, 0x70b3,
2338 "Command completed with read data overrun thread_id=%d\n",
2340 rval = EXT_STATUS_DATA_OVERRUN;
2343 case CS_BIDIR_RD_WR_OVERRUN:
2344 ql_dbg(ql_dbg_user, vha, 0x70b4,
2345 "Command completed with read and write data overrun "
2346 "thread_id=%d\n", thread_id);
2347 rval = EXT_STATUS_DATA_OVERRUN;
2350 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2351 ql_dbg(ql_dbg_user, vha, 0x70b5,
2352 "Command completed with read data over and write data "
2353 "underrun thread_id=%d\n", thread_id);
2354 rval = EXT_STATUS_DATA_OVERRUN;
2357 case CS_BIDIR_RD_UNDERRUN:
2358 ql_dbg(ql_dbg_user, vha, 0x70b6,
2359 "Command completed with read data underrun "
2360 "thread_id=%d\n", thread_id);
2361 rval = EXT_STATUS_DATA_UNDERRUN;
2364 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2365 ql_dbg(ql_dbg_user, vha, 0x70b7,
2366 "Command completed with read data under and write data "
2367 "overrun thread_id=%d\n", thread_id);
2368 rval = EXT_STATUS_DATA_UNDERRUN;
2371 case CS_BIDIR_RD_WR_UNDERRUN:
2372 ql_dbg(ql_dbg_user, vha, 0x70b8,
2373 "Command completed with read and write data underrun "
2374 "thread_id=%d\n", thread_id);
2375 rval = EXT_STATUS_DATA_UNDERRUN;
2379 ql_dbg(ql_dbg_user, vha, 0x70b9,
2380 "Command completed with data DMA error thread_id=%d\n",
2382 rval = EXT_STATUS_DMA_ERR;
2386 ql_dbg(ql_dbg_user, vha, 0x70ba,
2387 "Command completed with timeout thread_id=%d\n",
2389 rval = EXT_STATUS_TIMEOUT;
2392 ql_dbg(ql_dbg_user, vha, 0x70bb,
2393 "Command completed with completion status=0x%x "
2394 "thread_id=%d\n", comp_status, thread_id);
2395 rval = EXT_STATUS_ERR;
2398 bsg_reply->reply_payload_rcv_len = 0;
2401 /* Return the vendor specific reply to API */
2402 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2403 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2404 /* Always return DID_OK, bsg will send the vendor specific response
2405 * in this case only */
2406 sp->done(sp, DID_OK << 16);
2411 * qla2x00_status_entry() - Process a Status IOCB entry.
2412 * @vha: SCSI driver HA context
2413 * @rsp: response queue
2414 * @pkt: Entry pointer
2417 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2421 struct scsi_cmnd *cp;
2422 sts_entry_t *sts = pkt;
2423 struct sts_entry_24xx *sts24 = pkt;
2424 uint16_t comp_status;
2425 uint16_t scsi_status;
2427 uint8_t lscsi_status;
2429 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2431 uint8_t *rsp_info, *sense_data;
2432 struct qla_hw_data *ha = vha->hw;
2435 struct req_que *req;
2438 uint16_t state_flags = 0;
2439 uint16_t retry_delay = 0;
2441 if (IS_FWI2_CAPABLE(ha)) {
2442 comp_status = le16_to_cpu(sts24->comp_status);
2443 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2444 state_flags = le16_to_cpu(sts24->state_flags);
2446 comp_status = le16_to_cpu(sts->comp_status);
2447 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2449 handle = (uint32_t) LSW(sts->handle);
2450 que = MSW(sts->handle);
2451 req = ha->req_q_map[que];
2453 /* Check for invalid queue pointer */
2455 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2456 ql_dbg(ql_dbg_io, vha, 0x3059,
2457 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2458 "que=%u.\n", sts->handle, req, que);
2462 /* Validate handle. */
2463 if (handle < req->num_outstanding_cmds) {
2464 sp = req->outstanding_cmds[handle];
2466 ql_dbg(ql_dbg_io, vha, 0x3075,
2467 "%s(%ld): Already returned command for status handle (0x%x).\n",
2468 __func__, vha->host_no, sts->handle);
2472 ql_dbg(ql_dbg_io, vha, 0x3017,
2473 "Invalid status handle, out of range (0x%x).\n",
2476 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2477 if (IS_P3P_TYPE(ha))
2478 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2480 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2481 qla2xxx_wake_dpc(vha);
2491 if (sp->cmd_type != TYPE_SRB) {
2492 req->outstanding_cmds[handle] = NULL;
2493 ql_dbg(ql_dbg_io, vha, 0x3015,
2494 "Unknown sp->cmd_type %x %p).\n",
2499 /* NVME completion. */
2500 if (sp->type == SRB_NVME_CMD) {
2501 req->outstanding_cmds[handle] = NULL;
2502 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2506 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2507 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2511 /* Task Management completion. */
2512 if (sp->type == SRB_TM_CMD) {
2513 qla24xx_tm_iocb_entry(vha, req, pkt);
2517 /* Fast path completion. */
2518 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2519 qla2x00_process_completed_request(vha, req, handle);
2524 req->outstanding_cmds[handle] = NULL;
2525 cp = GET_CMD_SP(sp);
2527 ql_dbg(ql_dbg_io, vha, 0x3018,
2528 "Command already returned (0x%x/%p).\n",
2534 lscsi_status = scsi_status & STATUS_MASK;
2536 fcport = sp->fcport;
2539 sense_len = par_sense_len = rsp_info_len = resid_len =
2541 if (IS_FWI2_CAPABLE(ha)) {
2542 if (scsi_status & SS_SENSE_LEN_VALID)
2543 sense_len = le32_to_cpu(sts24->sense_len);
2544 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2545 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2546 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2547 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2548 if (comp_status == CS_DATA_UNDERRUN)
2549 fw_resid_len = le32_to_cpu(sts24->residual_len);
2550 rsp_info = sts24->data;
2551 sense_data = sts24->data;
2552 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2553 ox_id = le16_to_cpu(sts24->ox_id);
2554 par_sense_len = sizeof(sts24->data);
2555 /* Valid values of the retry delay timer are 0x1-0xffef */
2556 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
2557 retry_delay = sts24->retry_delay & 0x3fff;
2558 ql_dbg(ql_dbg_io, sp->vha, 0x3033,
2559 "%s: scope=%#x retry_delay=%#x\n", __func__,
2560 sts24->retry_delay >> 14, retry_delay);
2563 if (scsi_status & SS_SENSE_LEN_VALID)
2564 sense_len = le16_to_cpu(sts->req_sense_length);
2565 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2566 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2567 resid_len = le32_to_cpu(sts->residual_length);
2568 rsp_info = sts->rsp_info;
2569 sense_data = sts->req_sense_data;
2570 par_sense_len = sizeof(sts->req_sense_data);
2573 /* Check for any FCP transport errors. */
2574 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2575 /* Sense data lies beyond any FCP RESPONSE data. */
2576 if (IS_FWI2_CAPABLE(ha)) {
2577 sense_data += rsp_info_len;
2578 par_sense_len -= rsp_info_len;
2580 if (rsp_info_len > 3 && rsp_info[3]) {
2581 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2582 "FCP I/O protocol failure (0x%x/0x%x).\n",
2583 rsp_info_len, rsp_info[3]);
2585 res = DID_BUS_BUSY << 16;
2590 /* Check for overrun. */
2591 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2592 scsi_status & SS_RESIDUAL_OVER)
2593 comp_status = CS_DATA_OVERRUN;
2596 * Check retry_delay_timer value if we receive a busy or
2599 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2600 lscsi_status == SAM_STAT_BUSY)
2601 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2604 * Based on Host and scsi status generate status code for Linux
2606 switch (comp_status) {
2609 if (scsi_status == 0) {
2613 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2615 scsi_set_resid(cp, resid);
2617 if (!lscsi_status &&
2618 ((unsigned)(scsi_bufflen(cp) - resid) <
2620 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2621 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2622 resid, scsi_bufflen(cp));
2624 res = DID_ERROR << 16;
2628 res = DID_OK << 16 | lscsi_status;
2630 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2631 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2632 "QUEUE FULL detected.\n");
2636 if (lscsi_status != SS_CHECK_CONDITION)
2639 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2640 if (!(scsi_status & SS_SENSE_LEN_VALID))
2643 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2647 case CS_DATA_UNDERRUN:
2648 /* Use F/W calculated residual length. */
2649 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2650 scsi_set_resid(cp, resid);
2651 if (scsi_status & SS_RESIDUAL_UNDER) {
2652 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2653 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2654 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2655 resid, scsi_bufflen(cp));
2657 res = DID_ERROR << 16 | lscsi_status;
2658 goto check_scsi_status;
2661 if (!lscsi_status &&
2662 ((unsigned)(scsi_bufflen(cp) - resid) <
2664 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2665 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2666 resid, scsi_bufflen(cp));
2668 res = DID_ERROR << 16;
2671 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2672 lscsi_status != SAM_STAT_BUSY) {
2674 * scsi status of task set and busy are considered to be
2675 * task not completed.
2678 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2679 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2680 resid, scsi_bufflen(cp));
2682 res = DID_ERROR << 16 | lscsi_status;
2683 goto check_scsi_status;
2685 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2686 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2687 scsi_status, lscsi_status);
2690 res = DID_OK << 16 | lscsi_status;
2695 * Check to see if SCSI Status is non zero. If so report SCSI
2698 if (lscsi_status != 0) {
2699 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2700 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2701 "QUEUE FULL detected.\n");
2705 if (lscsi_status != SS_CHECK_CONDITION)
2708 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2709 if (!(scsi_status & SS_SENSE_LEN_VALID))
2712 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2713 sense_len, rsp, res);
2717 case CS_PORT_LOGGED_OUT:
2718 case CS_PORT_CONFIG_CHG:
2721 case CS_PORT_UNAVAILABLE:
2726 * We are going to have the fc class block the rport
2727 * while we try to recover so instruct the mid layer
2728 * to requeue until the class decides how to handle this.
2730 res = DID_TRANSPORT_DISRUPTED << 16;
2732 if (comp_status == CS_TIMEOUT) {
2733 if (IS_FWI2_CAPABLE(ha))
2735 else if ((le16_to_cpu(sts->status_flags) &
2736 SF_LOGOUT_SENT) == 0)
2740 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2741 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2742 "Port to be marked lost on fcport=%02x%02x%02x, current "
2743 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2744 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2745 port_state_str[FCS_ONLINE],
2748 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2749 qlt_schedule_sess_for_deletion(fcport);
2755 res = DID_RESET << 16;
2759 logit = qla2x00_handle_dif_error(sp, sts24);
2764 res = DID_ERROR << 16;
2766 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2769 if (state_flags & BIT_4)
2770 scmd_printk(KERN_WARNING, cp,
2771 "Unsupported device '%s' found.\n",
2772 cp->device->vendor);
2776 ql_log(ql_log_info, fcport->vha, 0x3022,
2777 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2778 comp_status, scsi_status, res, vha->host_no,
2779 cp->device->id, cp->device->lun, fcport->d_id.b24,
2780 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2781 resid_len, fw_resid_len, sp, cp);
2782 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
2783 pkt, sizeof(*sts24));
2784 res = DID_ERROR << 16;
2787 res = DID_ERROR << 16;
2793 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2794 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2795 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2796 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2797 comp_status, scsi_status, res, vha->host_no,
2798 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2799 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2800 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2801 resid_len, fw_resid_len, sp, cp);
2803 if (rsp->status_srb == NULL)
2808 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2809 * @rsp: response queue
2810 * @pkt: Entry pointer
2812 * Extended sense data.
2815 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2817 uint8_t sense_sz = 0;
2818 struct qla_hw_data *ha = rsp->hw;
2819 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2820 srb_t *sp = rsp->status_srb;
2821 struct scsi_cmnd *cp;
2825 if (!sp || !GET_CMD_SENSE_LEN(sp))
2828 sense_len = GET_CMD_SENSE_LEN(sp);
2829 sense_ptr = GET_CMD_SENSE_PTR(sp);
2831 cp = GET_CMD_SP(sp);
2833 ql_log(ql_log_warn, vha, 0x3025,
2834 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2836 rsp->status_srb = NULL;
2840 if (sense_len > sizeof(pkt->data))
2841 sense_sz = sizeof(pkt->data);
2843 sense_sz = sense_len;
2845 /* Move sense data. */
2846 if (IS_FWI2_CAPABLE(ha))
2847 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2848 memcpy(sense_ptr, pkt->data, sense_sz);
2849 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2850 sense_ptr, sense_sz);
2852 sense_len -= sense_sz;
2853 sense_ptr += sense_sz;
2855 SET_CMD_SENSE_PTR(sp, sense_ptr);
2856 SET_CMD_SENSE_LEN(sp, sense_len);
2858 /* Place command on done queue. */
2859 if (sense_len == 0) {
2860 rsp->status_srb = NULL;
2861 sp->done(sp, cp->result);
2866 * qla2x00_error_entry() - Process an error entry.
2867 * @vha: SCSI driver HA context
2868 * @rsp: response queue
2869 * @pkt: Entry pointer
2870 * return : 1=allow further error analysis. 0=no additional error analysis.
2873 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2876 struct qla_hw_data *ha = vha->hw;
2877 const char func[] = "ERROR-IOCB";
2878 uint16_t que = MSW(pkt->handle);
2879 struct req_que *req = NULL;
2880 int res = DID_ERROR << 16;
2882 ql_dbg(ql_dbg_async, vha, 0x502a,
2883 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
2884 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
2886 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2889 req = ha->req_q_map[que];
2891 if (pkt->entry_status & RF_BUSY)
2892 res = DID_BUS_BUSY << 16;
2894 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
2897 switch (pkt->entry_type) {
2898 case NOTIFY_ACK_TYPE:
2900 case STATUS_CONT_TYPE:
2901 case LOGINOUT_PORT_IOCB_TYPE:
2904 case ABORT_IOCB_TYPE:
2907 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2914 case ABTS_RESP_24XX:
2920 ql_log(ql_log_warn, vha, 0x5030,
2921 "Error entry - invalid handle/queue (%04x).\n", que);
2926 * qla24xx_mbx_completion() - Process mailbox command completions.
2927 * @vha: SCSI driver HA context
2928 * @mb0: Mailbox0 register
2931 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2935 uint16_t __iomem *wptr;
2936 struct qla_hw_data *ha = vha->hw;
2937 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2939 /* Read all mbox registers? */
2940 WARN_ON_ONCE(ha->mbx_count > 32);
2941 mboxes = (1ULL << ha->mbx_count) - 1;
2943 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2945 mboxes = ha->mcp->in_mb;
2947 /* Load return mailbox registers. */
2948 ha->flags.mbox_int = 1;
2949 ha->mailbox_out[0] = mb0;
2951 wptr = (uint16_t __iomem *)®->mailbox1;
2953 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2955 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2963 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2964 struct abort_entry_24xx *pkt)
2966 const char func[] = "ABT_IOCB";
2968 struct srb_iocb *abt;
2970 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2974 abt = &sp->u.iocb_cmd;
2975 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2979 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
2980 struct pt_ls4_request *pkt, struct req_que *req)
2983 const char func[] = "LS4_IOCB";
2984 uint16_t comp_status;
2986 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2990 comp_status = le16_to_cpu(pkt->status);
2991 sp->done(sp, comp_status);
2995 * qla24xx_process_response_queue() - Process response queue entries.
2996 * @vha: SCSI driver HA context
2997 * @rsp: response queue
2999 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3000 struct rsp_que *rsp)
3002 struct sts_entry_24xx *pkt;
3003 struct qla_hw_data *ha = vha->hw;
3005 if (!ha->flags.fw_started)
3008 if (rsp->qpair->cpuid != smp_processor_id())
3009 qla_cpu_update(rsp->qpair, smp_processor_id());
3011 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3012 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3015 if (rsp->ring_index == rsp->length) {
3016 rsp->ring_index = 0;
3017 rsp->ring_ptr = rsp->ring;
3022 if (pkt->entry_status != 0) {
3023 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3026 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3032 switch (pkt->entry_type) {
3034 qla2x00_status_entry(vha, rsp, pkt);
3036 case STATUS_CONT_TYPE:
3037 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3039 case VP_RPT_ID_IOCB_TYPE:
3040 qla24xx_report_id_acquisition(vha,
3041 (struct vp_rpt_id_entry_24xx *)pkt);
3043 case LOGINOUT_PORT_IOCB_TYPE:
3044 qla24xx_logio_entry(vha, rsp->req,
3045 (struct logio_entry_24xx *)pkt);
3048 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3051 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3053 case ABTS_RECV_24XX:
3054 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3056 /* ensure that the ATIO queue is empty */
3057 qlt_handle_abts_recv(vha, rsp,
3061 qlt_24xx_process_atio_queue(vha, 1);
3064 case ABTS_RESP_24XX:
3067 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3069 case PT_LS4_REQUEST:
3070 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3073 case NOTIFY_ACK_TYPE:
3074 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3075 qlt_response_pkt_all_vps(vha, rsp,
3078 qla24xxx_nack_iocb_entry(vha, rsp->req,
3079 (struct nack_to_isp *)pkt);
3082 /* Do nothing in this case, this check is to prevent it
3083 * from falling into default case
3086 case ABORT_IOCB_TYPE:
3087 qla24xx_abort_iocb_entry(vha, rsp->req,
3088 (struct abort_entry_24xx *)pkt);
3091 qla24xx_mbx_iocb_entry(vha, rsp->req,
3092 (struct mbx_24xx_entry *)pkt);
3094 case VP_CTRL_IOCB_TYPE:
3095 qla_ctrlvp_completed(vha, rsp->req,
3096 (struct vp_ctrl_entry_24xx *)pkt);
3099 /* Type Not Supported. */
3100 ql_dbg(ql_dbg_async, vha, 0x5042,
3101 "Received unknown response pkt type %x "
3102 "entry status=%x.\n",
3103 pkt->entry_type, pkt->entry_status);
3106 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3110 /* Adjust ring index */
3111 if (IS_P3P_TYPE(ha)) {
3112 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3114 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
3116 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
3121 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3125 struct qla_hw_data *ha = vha->hw;
3126 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3128 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3129 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3133 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
3134 RD_REG_DWORD(®->iobase_addr);
3135 WRT_REG_DWORD(®->iobase_window, 0x0001);
3136 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3137 rval == QLA_SUCCESS; cnt--) {
3139 WRT_REG_DWORD(®->iobase_window, 0x0001);
3142 rval = QLA_FUNCTION_TIMEOUT;
3144 if (rval == QLA_SUCCESS)
3148 WRT_REG_DWORD(®->iobase_window, 0x0003);
3149 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3150 rval == QLA_SUCCESS; cnt--) {
3152 WRT_REG_DWORD(®->iobase_window, 0x0003);
3155 rval = QLA_FUNCTION_TIMEOUT;
3157 if (rval != QLA_SUCCESS)
3161 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
3162 ql_log(ql_log_info, vha, 0x504c,
3163 "Additional code -- 0x55AA.\n");
3166 WRT_REG_DWORD(®->iobase_window, 0x0000);
3167 RD_REG_DWORD(®->iobase_window);
3171 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3172 * @irq: interrupt number
3173 * @dev_id: SCSI driver HA context
3175 * Called by system whenever the host adapter generates an interrupt.
3177 * Returns handled flag.
3180 qla24xx_intr_handler(int irq, void *dev_id)
3182 scsi_qla_host_t *vha;
3183 struct qla_hw_data *ha;
3184 struct device_reg_24xx __iomem *reg;
3190 struct rsp_que *rsp;
3191 unsigned long flags;
3192 bool process_atio = false;
3194 rsp = (struct rsp_que *) dev_id;
3196 ql_log(ql_log_info, NULL, 0x5059,
3197 "%s: NULL response queue pointer.\n", __func__);
3202 reg = &ha->iobase->isp24;
3205 if (unlikely(pci_channel_offline(ha->pdev)))
3208 spin_lock_irqsave(&ha->hardware_lock, flags);
3209 vha = pci_get_drvdata(ha->pdev);
3210 for (iter = 50; iter--; ) {
3211 stat = RD_REG_DWORD(®->host_status);
3212 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3214 if (stat & HSRX_RISC_PAUSED) {
3215 if (unlikely(pci_channel_offline(ha->pdev)))
3218 hccr = RD_REG_DWORD(®->hccr);
3220 ql_log(ql_log_warn, vha, 0x504b,
3221 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3224 qla2xxx_check_risc_status(vha);
3226 ha->isp_ops->fw_dump(vha, 1);
3227 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3229 } else if ((stat & HSRX_RISC_INT) == 0)
3232 switch (stat & 0xff) {
3233 case INTR_ROM_MB_SUCCESS:
3234 case INTR_ROM_MB_FAILED:
3235 case INTR_MB_SUCCESS:
3236 case INTR_MB_FAILED:
3237 qla24xx_mbx_completion(vha, MSW(stat));
3238 status |= MBX_INTERRUPT;
3241 case INTR_ASYNC_EVENT:
3243 mb[1] = RD_REG_WORD(®->mailbox1);
3244 mb[2] = RD_REG_WORD(®->mailbox2);
3245 mb[3] = RD_REG_WORD(®->mailbox3);
3246 qla2x00_async_event(vha, rsp, mb);
3248 case INTR_RSP_QUE_UPDATE:
3249 case INTR_RSP_QUE_UPDATE_83XX:
3250 qla24xx_process_response_queue(vha, rsp);
3252 case INTR_ATIO_QUE_UPDATE_27XX:
3253 case INTR_ATIO_QUE_UPDATE:
3254 process_atio = true;
3256 case INTR_ATIO_RSP_QUE_UPDATE:
3257 process_atio = true;
3258 qla24xx_process_response_queue(vha, rsp);
3261 ql_dbg(ql_dbg_async, vha, 0x504f,
3262 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3265 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3266 RD_REG_DWORD_RELAXED(®->hccr);
3267 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3270 qla2x00_handle_mbx_completion(ha, status);
3271 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3274 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3275 qlt_24xx_process_atio_queue(vha, 0);
3276 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3283 qla24xx_msix_rsp_q(int irq, void *dev_id)
3285 struct qla_hw_data *ha;
3286 struct rsp_que *rsp;
3287 struct device_reg_24xx __iomem *reg;
3288 struct scsi_qla_host *vha;
3289 unsigned long flags;
3291 rsp = (struct rsp_que *) dev_id;
3293 ql_log(ql_log_info, NULL, 0x505a,
3294 "%s: NULL response queue pointer.\n", __func__);
3298 reg = &ha->iobase->isp24;
3300 spin_lock_irqsave(&ha->hardware_lock, flags);
3302 vha = pci_get_drvdata(ha->pdev);
3303 qla24xx_process_response_queue(vha, rsp);
3304 if (!ha->flags.disable_msix_handshake) {
3305 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3306 RD_REG_DWORD_RELAXED(®->hccr);
3308 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3314 qla24xx_msix_default(int irq, void *dev_id)
3316 scsi_qla_host_t *vha;
3317 struct qla_hw_data *ha;
3318 struct rsp_que *rsp;
3319 struct device_reg_24xx __iomem *reg;
3324 unsigned long flags;
3325 bool process_atio = false;
3327 rsp = (struct rsp_que *) dev_id;
3329 ql_log(ql_log_info, NULL, 0x505c,
3330 "%s: NULL response queue pointer.\n", __func__);
3334 reg = &ha->iobase->isp24;
3337 spin_lock_irqsave(&ha->hardware_lock, flags);
3338 vha = pci_get_drvdata(ha->pdev);
3340 stat = RD_REG_DWORD(®->host_status);
3341 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3343 if (stat & HSRX_RISC_PAUSED) {
3344 if (unlikely(pci_channel_offline(ha->pdev)))
3347 hccr = RD_REG_DWORD(®->hccr);
3349 ql_log(ql_log_info, vha, 0x5050,
3350 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3353 qla2xxx_check_risc_status(vha);
3355 ha->isp_ops->fw_dump(vha, 1);
3356 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3358 } else if ((stat & HSRX_RISC_INT) == 0)
3361 switch (stat & 0xff) {
3362 case INTR_ROM_MB_SUCCESS:
3363 case INTR_ROM_MB_FAILED:
3364 case INTR_MB_SUCCESS:
3365 case INTR_MB_FAILED:
3366 qla24xx_mbx_completion(vha, MSW(stat));
3367 status |= MBX_INTERRUPT;
3370 case INTR_ASYNC_EVENT:
3372 mb[1] = RD_REG_WORD(®->mailbox1);
3373 mb[2] = RD_REG_WORD(®->mailbox2);
3374 mb[3] = RD_REG_WORD(®->mailbox3);
3375 qla2x00_async_event(vha, rsp, mb);
3377 case INTR_RSP_QUE_UPDATE:
3378 case INTR_RSP_QUE_UPDATE_83XX:
3379 qla24xx_process_response_queue(vha, rsp);
3381 case INTR_ATIO_QUE_UPDATE_27XX:
3382 case INTR_ATIO_QUE_UPDATE:
3383 process_atio = true;
3385 case INTR_ATIO_RSP_QUE_UPDATE:
3386 process_atio = true;
3387 qla24xx_process_response_queue(vha, rsp);
3390 ql_dbg(ql_dbg_async, vha, 0x5051,
3391 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3394 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3396 qla2x00_handle_mbx_completion(ha, status);
3397 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3400 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3401 qlt_24xx_process_atio_queue(vha, 0);
3402 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3409 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3411 struct qla_hw_data *ha;
3412 struct qla_qpair *qpair;
3413 struct device_reg_24xx __iomem *reg;
3414 unsigned long flags;
3418 ql_log(ql_log_info, NULL, 0x505b,
3419 "%s: NULL response queue pointer.\n", __func__);
3424 /* Clear the interrupt, if enabled, for this response queue */
3425 if (unlikely(!ha->flags.disable_msix_handshake)) {
3426 reg = &ha->iobase->isp24;
3427 spin_lock_irqsave(&ha->hardware_lock, flags);
3428 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3429 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3432 queue_work(ha->wq, &qpair->q_work);
3437 /* Interrupt handling helpers. */
3439 struct qla_init_msix_entry {
3441 irq_handler_t handler;
3444 static const struct qla_init_msix_entry msix_entries[] = {
3445 { "default", qla24xx_msix_default },
3446 { "rsp_q", qla24xx_msix_rsp_q },
3447 { "atio_q", qla83xx_msix_atio_q },
3448 { "qpair_multiq", qla2xxx_msix_rsp_q },
3451 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3452 { "qla2xxx (default)", qla82xx_msix_default },
3453 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3457 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3460 struct qla_msix_entry *qentry;
3461 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3462 int min_vecs = QLA_BASE_VECTORS;
3463 struct irq_affinity desc = {
3464 .pre_vectors = QLA_BASE_VECTORS,
3467 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3468 IS_ATIO_MSIX_CAPABLE(ha)) {
3473 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
3474 /* user wants to control IRQ setting for target mode */
3475 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3476 ha->msix_count, PCI_IRQ_MSIX);
3478 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3479 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3483 ql_log(ql_log_fatal, vha, 0x00c7,
3484 "MSI-X: Failed to enable support, "
3485 "giving up -- %d/%d.\n",
3486 ha->msix_count, ret);
3488 } else if (ret < ha->msix_count) {
3489 ql_log(ql_log_info, vha, 0x00c6,
3490 "MSI-X: Using %d vectors\n", ret);
3491 ha->msix_count = ret;
3492 /* Recalculate queue values */
3493 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3494 ha->max_req_queues = ha->msix_count - 1;
3496 /* ATIOQ needs 1 vector. That's 1 less QPair */
3497 if (QLA_TGT_MODE_ENABLED())
3498 ha->max_req_queues--;
3500 ha->max_rsp_queues = ha->max_req_queues;
3502 ha->max_qpairs = ha->max_req_queues - 1;
3503 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3504 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3507 vha->irq_offset = desc.pre_vectors;
3508 ha->msix_entries = kcalloc(ha->msix_count,
3509 sizeof(struct qla_msix_entry),
3511 if (!ha->msix_entries) {
3512 ql_log(ql_log_fatal, vha, 0x00c8,
3513 "Failed to allocate memory for ha->msix_entries.\n");
3517 ha->flags.msix_enabled = 1;
3519 for (i = 0; i < ha->msix_count; i++) {
3520 qentry = &ha->msix_entries[i];
3521 qentry->vector = pci_irq_vector(ha->pdev, i);
3523 qentry->have_irq = 0;
3525 qentry->handle = NULL;
3528 /* Enable MSI-X vectors for the base queue */
3529 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3530 qentry = &ha->msix_entries[i];
3531 qentry->handle = rsp;
3533 scnprintf(qentry->name, sizeof(qentry->name),
3534 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3535 if (IS_P3P_TYPE(ha))
3536 ret = request_irq(qentry->vector,
3537 qla82xx_msix_entries[i].handler,
3538 0, qla82xx_msix_entries[i].name, rsp);
3540 ret = request_irq(qentry->vector,
3541 msix_entries[i].handler,
3542 0, qentry->name, rsp);
3544 goto msix_register_fail;
3545 qentry->have_irq = 1;
3550 * If target mode is enable, also request the vector for the ATIO
3553 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3554 IS_ATIO_MSIX_CAPABLE(ha)) {
3555 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3557 qentry->handle = rsp;
3558 scnprintf(qentry->name, sizeof(qentry->name),
3559 "qla2xxx%lu_%s", vha->host_no,
3560 msix_entries[QLA_ATIO_VECTOR].name);
3562 ret = request_irq(qentry->vector,
3563 msix_entries[QLA_ATIO_VECTOR].handler,
3564 0, qentry->name, rsp);
3565 qentry->have_irq = 1;
3570 ql_log(ql_log_fatal, vha, 0x00cb,
3571 "MSI-X: unable to register handler -- %x/%d.\n",
3572 qentry->vector, ret);
3573 qla2x00_free_irqs(vha);
3578 /* Enable MSI-X vector for response queue update for queue 0 */
3579 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3580 if (ha->msixbase && ha->mqiobase &&
3581 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3586 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3589 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3590 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3591 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3592 ql_dbg(ql_dbg_init, vha, 0x0055,
3593 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3594 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3600 pci_free_irq_vectors(ha->pdev);
3605 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3607 int ret = QLA_FUNCTION_FAILED;
3608 device_reg_t *reg = ha->iobase;
3609 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3611 /* If possible, enable MSI-X. */
3612 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3613 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
3614 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
3617 if (ql2xenablemsix == 2)
3620 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3621 (ha->pdev->subsystem_device == 0x7040 ||
3622 ha->pdev->subsystem_device == 0x7041 ||
3623 ha->pdev->subsystem_device == 0x1705)) {
3624 ql_log(ql_log_warn, vha, 0x0034,
3625 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3626 ha->pdev->subsystem_vendor,
3627 ha->pdev->subsystem_device);
3631 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3632 ql_log(ql_log_warn, vha, 0x0035,
3633 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3634 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3638 ret = qla24xx_enable_msix(ha, rsp);
3640 ql_dbg(ql_dbg_init, vha, 0x0036,
3641 "MSI-X: Enabled (0x%X, 0x%X).\n",
3642 ha->chip_revision, ha->fw_attributes);
3643 goto clear_risc_ints;
3648 ql_log(ql_log_info, vha, 0x0037,
3649 "Falling back-to MSI mode -- ret=%d.\n", ret);
3651 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3652 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3653 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3656 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3658 ql_dbg(ql_dbg_init, vha, 0x0038,
3660 ha->flags.msi_enabled = 1;
3662 ql_log(ql_log_warn, vha, 0x0039,
3663 "Falling back-to INTa mode -- ret=%d.\n", ret);
3666 /* Skip INTx on ISP82xx. */
3667 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3668 return QLA_FUNCTION_FAILED;
3670 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3671 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3672 QLA2XXX_DRIVER_NAME, rsp);
3674 ql_log(ql_log_warn, vha, 0x003a,
3675 "Failed to reserve interrupt %d already in use.\n",
3678 } else if (!ha->flags.msi_enabled) {
3679 ql_dbg(ql_dbg_init, vha, 0x0125,
3680 "INTa mode: Enabled.\n");
3681 ha->flags.mr_intr_valid = 1;
3685 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3688 spin_lock_irq(&ha->hardware_lock);
3689 WRT_REG_WORD(®->isp.semaphore, 0);
3690 spin_unlock_irq(&ha->hardware_lock);
3697 qla2x00_free_irqs(scsi_qla_host_t *vha)
3699 struct qla_hw_data *ha = vha->hw;
3700 struct rsp_que *rsp;
3701 struct qla_msix_entry *qentry;
3705 * We need to check that ha->rsp_q_map is valid in case we are called
3706 * from a probe failure context.
3708 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3710 rsp = ha->rsp_q_map[0];
3712 if (ha->flags.msix_enabled) {
3713 for (i = 0; i < ha->msix_count; i++) {
3714 qentry = &ha->msix_entries[i];
3715 if (qentry->have_irq) {
3716 irq_set_affinity_notifier(qentry->vector, NULL);
3717 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3720 kfree(ha->msix_entries);
3721 ha->msix_entries = NULL;
3722 ha->flags.msix_enabled = 0;
3723 ql_dbg(ql_dbg_init, vha, 0x0042,
3724 "Disabled MSI-X.\n");
3726 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3730 pci_free_irq_vectors(ha->pdev);
3733 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3734 struct qla_msix_entry *msix, int vector_type)
3736 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3737 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3740 scnprintf(msix->name, sizeof(msix->name),
3741 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3742 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3744 ql_log(ql_log_fatal, vha, 0x00e6,
3745 "MSI-X: Unable to register handler -- %x/%d.\n",
3750 msix->handle = qpair;