2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
23 static const char *mb_to_str(uint16_t cmd)
26 struct mb_cmd_name *e;
28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
36 static struct rom_cmd {
40 { MBC_EXECUTE_FIRMWARE },
41 { MBC_READ_RAM_WORD },
42 { MBC_MAILBOX_REGISTER_TEST },
43 { MBC_VERIFY_CHECKSUM },
44 { MBC_GET_FIRMWARE_VERSION },
45 { MBC_LOAD_RISC_RAM },
46 { MBC_DUMP_RISC_RAM },
47 { MBC_LOAD_RISC_RAM_EXTENDED },
48 { MBC_DUMP_RISC_RAM_EXTENDED },
49 { MBC_WRITE_RAM_WORD_EXTENDED },
50 { MBC_READ_RAM_EXTENDED },
51 { MBC_GET_RESOURCE_COUNTS },
52 { MBC_SET_FIRMWARE_OPTION },
53 { MBC_MID_INITIALIZE_FIRMWARE },
54 { MBC_GET_FIRMWARE_STATE },
55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 { MBC_GET_RETRY_COUNT },
57 { MBC_TRACE_CONTROL },
58 { MBC_INITIALIZE_MULTIQ },
59 { MBC_IOCB_COMMAND_A64 },
60 { MBC_GET_ADAPTER_LOOP_ID },
62 { MBC_GET_RNID_PARAMS },
63 { MBC_GET_SET_ZIO_THRESHOLD },
66 static int is_rom_cmd(uint16_t cmd)
71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
81 * qla2x00_mailbox_command
82 * Issue mailbox command and waits for completion.
85 * ha = adapter block pointer.
86 * mcp = driver internal mbx struct pointer.
89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
92 * 0 : QLA_SUCCESS = cmd performed success
93 * 1 : QLA_FUNCTION_FAILED (error encountered)
94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
103 unsigned long flags = 0;
105 uint8_t abort_active;
107 uint16_t command = 0;
109 uint16_t __iomem *optr;
112 unsigned long wait_time;
113 struct qla_hw_data *ha = vha->hw;
114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
120 if (ha->pdev->error_state > pci_channel_io_frozen) {
121 ql_log(ql_log_warn, vha, 0x1001,
122 "error_state is greater than pci_channel_io_frozen, "
124 return QLA_FUNCTION_TIMEOUT;
127 if (vha->device_flags & DFLG_DEV_FAILED) {
128 ql_log(ql_log_warn, vha, 0x1002,
129 "Device in failed state, exiting.\n");
130 return QLA_FUNCTION_TIMEOUT;
133 /* if PCI error, then avoid mbx processing.*/
134 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
135 test_bit(UNLOADING, &base_vha->dpc_flags)) {
136 ql_log(ql_log_warn, vha, 0xd04e,
137 "PCI error, exiting.\n");
138 return QLA_FUNCTION_TIMEOUT;
142 io_lock_on = base_vha->flags.init_done;
145 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
146 chip_reset = ha->chip_reset;
148 if (ha->flags.pci_channel_io_perm_failure) {
149 ql_log(ql_log_warn, vha, 0x1003,
150 "Perm failure on EEH timeout MBX, exiting.\n");
151 return QLA_FUNCTION_TIMEOUT;
154 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
155 /* Setting Link-Down error */
156 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
157 ql_log(ql_log_warn, vha, 0x1004,
158 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
159 return QLA_FUNCTION_TIMEOUT;
162 /* check if ISP abort is active and return cmd with timeout */
163 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
164 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
165 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
166 !is_rom_cmd(mcp->mb[0])) {
167 ql_log(ql_log_info, vha, 0x1005,
168 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
170 return QLA_FUNCTION_TIMEOUT;
173 atomic_inc(&ha->num_pend_mbx_stage1);
175 * Wait for active mailbox commands to finish by waiting at most tov
176 * seconds. This is to serialize actual issuing of mailbox cmds during
177 * non ISP abort time.
179 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
180 /* Timeout occurred. Return error. */
181 ql_log(ql_log_warn, vha, 0xd035,
182 "Cmd access timeout, cmd=0x%x, Exiting.\n",
184 atomic_dec(&ha->num_pend_mbx_stage1);
185 return QLA_FUNCTION_TIMEOUT;
187 atomic_dec(&ha->num_pend_mbx_stage1);
188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
194 /* Save mailbox command for debug */
197 ql_dbg(ql_dbg_mbx, vha, 0x1006,
198 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
200 spin_lock_irqsave(&ha->hardware_lock, flags);
202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
203 ha->flags.mbox_busy) {
205 spin_unlock_irqrestore(&ha->hardware_lock, flags);
208 ha->flags.mbox_busy = 1;
210 /* Load mailbox registers. */
212 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
213 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
214 optr = (uint16_t __iomem *)®->isp24.mailbox0;
216 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
219 command = mcp->mb[0];
220 mboxes = mcp->out_mb;
222 ql_dbg(ql_dbg_mbx, vha, 0x1111,
223 "Mailbox registers (OUT):\n");
224 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
225 if (IS_QLA2200(ha) && cnt == 8)
227 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
228 if (mboxes & BIT_0) {
229 ql_dbg(ql_dbg_mbx, vha, 0x1112,
230 "mbox[%d]<-0x%04x\n", cnt, *iptr);
231 WRT_REG_WORD(optr, *iptr);
239 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
240 "I/O Address = %p.\n", optr);
242 /* Issue set host interrupt command to send cmd out. */
243 ha->flags.mbox_int = 0;
244 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
246 /* Unlock mbx registers and wait for interrupt */
247 ql_dbg(ql_dbg_mbx, vha, 0x100f,
248 "Going to unlock irq & waiting for interrupts. "
249 "jiffies=%lx.\n", jiffies);
251 /* Wait for mbx cmd completion until timeout */
252 atomic_inc(&ha->num_pend_mbx_stage2);
253 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
254 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
256 if (IS_P3P_TYPE(ha)) {
257 if (RD_REG_DWORD(®->isp82.hint) &
258 HINT_MBX_INT_PENDING) {
259 ha->flags.mbox_busy = 0;
260 spin_unlock_irqrestore(&ha->hardware_lock,
263 atomic_dec(&ha->num_pend_mbx_stage2);
264 ql_dbg(ql_dbg_mbx, vha, 0x1010,
265 "Pending mailbox timeout, exiting.\n");
266 rval = QLA_FUNCTION_TIMEOUT;
269 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
270 } else if (IS_FWI2_CAPABLE(ha))
271 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
273 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
274 spin_unlock_irqrestore(&ha->hardware_lock, flags);
277 atomic_inc(&ha->num_pend_mbx_stage3);
278 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
280 if (chip_reset != ha->chip_reset) {
281 spin_lock_irqsave(&ha->hardware_lock, flags);
282 ha->flags.mbox_busy = 0;
283 spin_unlock_irqrestore(&ha->hardware_lock,
285 atomic_dec(&ha->num_pend_mbx_stage2);
286 atomic_dec(&ha->num_pend_mbx_stage3);
290 ql_dbg(ql_dbg_mbx, vha, 0x117a,
291 "cmd=%x Timeout.\n", command);
292 spin_lock_irqsave(&ha->hardware_lock, flags);
293 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
294 spin_unlock_irqrestore(&ha->hardware_lock, flags);
296 } else if (ha->flags.purge_mbox ||
297 chip_reset != ha->chip_reset) {
298 spin_lock_irqsave(&ha->hardware_lock, flags);
299 ha->flags.mbox_busy = 0;
300 spin_unlock_irqrestore(&ha->hardware_lock, flags);
301 atomic_dec(&ha->num_pend_mbx_stage2);
302 atomic_dec(&ha->num_pend_mbx_stage3);
306 atomic_dec(&ha->num_pend_mbx_stage3);
308 if (time_after(jiffies, wait_time + 5 * HZ))
309 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
310 command, jiffies_to_msecs(jiffies - wait_time));
312 ql_dbg(ql_dbg_mbx, vha, 0x1011,
313 "Cmd=%x Polling Mode.\n", command);
315 if (IS_P3P_TYPE(ha)) {
316 if (RD_REG_DWORD(®->isp82.hint) &
317 HINT_MBX_INT_PENDING) {
318 ha->flags.mbox_busy = 0;
319 spin_unlock_irqrestore(&ha->hardware_lock,
321 atomic_dec(&ha->num_pend_mbx_stage2);
322 ql_dbg(ql_dbg_mbx, vha, 0x1012,
323 "Pending mailbox timeout, exiting.\n");
324 rval = QLA_FUNCTION_TIMEOUT;
327 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
328 } else if (IS_FWI2_CAPABLE(ha))
329 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
331 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
332 spin_unlock_irqrestore(&ha->hardware_lock, flags);
334 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
335 while (!ha->flags.mbox_int) {
336 if (ha->flags.purge_mbox ||
337 chip_reset != ha->chip_reset) {
338 spin_lock_irqsave(&ha->hardware_lock, flags);
339 ha->flags.mbox_busy = 0;
340 spin_unlock_irqrestore(&ha->hardware_lock,
342 atomic_dec(&ha->num_pend_mbx_stage2);
347 if (time_after(jiffies, wait_time))
351 * Check if it's UNLOADING, cause we cannot poll in
352 * this case, or else a NULL pointer dereference
355 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
356 return QLA_FUNCTION_TIMEOUT;
358 /* Check for pending interrupts. */
359 qla2x00_poll(ha->rsp_q_map[0]);
361 if (!ha->flags.mbox_int &&
363 command == MBC_LOAD_RISC_RAM_EXTENDED))
366 ql_dbg(ql_dbg_mbx, vha, 0x1013,
368 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
370 atomic_dec(&ha->num_pend_mbx_stage2);
372 /* Check whether we timed out */
373 if (ha->flags.mbox_int) {
376 ql_dbg(ql_dbg_mbx, vha, 0x1014,
377 "Cmd=%x completed.\n", command);
379 /* Got interrupt. Clear the flag. */
380 ha->flags.mbox_int = 0;
381 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
383 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
384 spin_lock_irqsave(&ha->hardware_lock, flags);
385 ha->flags.mbox_busy = 0;
386 spin_unlock_irqrestore(&ha->hardware_lock, flags);
388 /* Setting Link-Down error */
389 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
391 rval = QLA_FUNCTION_FAILED;
392 ql_log(ql_log_warn, vha, 0xd048,
393 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
397 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
398 rval = QLA_FUNCTION_FAILED;
400 /* Load return mailbox registers. */
402 iptr = (uint16_t *)&ha->mailbox_out[0];
405 ql_dbg(ql_dbg_mbx, vha, 0x1113,
406 "Mailbox registers (IN):\n");
407 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
408 if (mboxes & BIT_0) {
410 ql_dbg(ql_dbg_mbx, vha, 0x1114,
411 "mbox[%d]->0x%04x\n", cnt, *iptr2);
421 uint32_t ictrl, host_status, hccr;
424 if (IS_FWI2_CAPABLE(ha)) {
425 mb[0] = RD_REG_WORD(®->isp24.mailbox0);
426 mb[1] = RD_REG_WORD(®->isp24.mailbox1);
427 mb[2] = RD_REG_WORD(®->isp24.mailbox2);
428 mb[3] = RD_REG_WORD(®->isp24.mailbox3);
429 mb[7] = RD_REG_WORD(®->isp24.mailbox7);
430 ictrl = RD_REG_DWORD(®->isp24.ictrl);
431 host_status = RD_REG_DWORD(®->isp24.host_status);
432 hccr = RD_REG_DWORD(®->isp24.hccr);
434 ql_log(ql_log_warn, vha, 0xd04c,
435 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
436 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
437 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
438 mb[7], host_status, hccr);
441 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
442 ictrl = RD_REG_WORD(®->isp.ictrl);
443 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
444 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
445 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
447 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
449 /* Capture FW dump only, if PCI device active */
450 if (!pci_channel_offline(vha->hw->pdev)) {
451 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
452 if (w == 0xffff || ictrl == 0xffffffff ||
453 (chip_reset != ha->chip_reset)) {
454 /* This is special case if there is unload
455 * of driver happening and if PCI device go
456 * into bad state due to PCI error condition
457 * then only PCI ERR flag would be set.
458 * we will do premature exit for above case.
460 spin_lock_irqsave(&ha->hardware_lock, flags);
461 ha->flags.mbox_busy = 0;
462 spin_unlock_irqrestore(&ha->hardware_lock,
464 rval = QLA_FUNCTION_TIMEOUT;
468 /* Attempt to capture firmware dump for further
469 * anallysis of the current formware state. we do not
470 * need to do this if we are intentionally generating
473 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
474 ha->isp_ops->fw_dump(vha, 0);
475 rval = QLA_FUNCTION_TIMEOUT;
478 spin_lock_irqsave(&ha->hardware_lock, flags);
479 ha->flags.mbox_busy = 0;
480 spin_unlock_irqrestore(&ha->hardware_lock, flags);
485 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
486 ql_dbg(ql_dbg_mbx, vha, 0x101a,
487 "Checking for additional resp interrupt.\n");
489 /* polling mode for non isp_abort commands. */
490 qla2x00_poll(ha->rsp_q_map[0]);
493 if (rval == QLA_FUNCTION_TIMEOUT &&
494 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
495 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
496 ha->flags.eeh_busy) {
497 /* not in dpc. schedule it for dpc to take over. */
498 ql_dbg(ql_dbg_mbx, vha, 0x101b,
499 "Timeout, schedule isp_abort_needed.\n");
501 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
502 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
503 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
504 if (IS_QLA82XX(ha)) {
505 ql_dbg(ql_dbg_mbx, vha, 0x112a,
506 "disabling pause transmit on port "
509 QLA82XX_CRB_NIU + 0x98,
510 CRB_NIU_XG_PAUSE_CTL_P0|
511 CRB_NIU_XG_PAUSE_CTL_P1);
513 ql_log(ql_log_info, base_vha, 0x101c,
514 "Mailbox cmd timeout occurred, cmd=0x%x, "
515 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
516 "abort.\n", command, mcp->mb[0],
518 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
519 qla2xxx_wake_dpc(vha);
521 } else if (current == ha->dpc_thread) {
522 /* call abort directly since we are in the DPC thread */
523 ql_dbg(ql_dbg_mbx, vha, 0x101d,
524 "Timeout, calling abort_isp.\n");
526 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
527 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
528 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
529 if (IS_QLA82XX(ha)) {
530 ql_dbg(ql_dbg_mbx, vha, 0x112b,
531 "disabling pause transmit on port "
534 QLA82XX_CRB_NIU + 0x98,
535 CRB_NIU_XG_PAUSE_CTL_P0|
536 CRB_NIU_XG_PAUSE_CTL_P1);
538 ql_log(ql_log_info, base_vha, 0x101e,
539 "Mailbox cmd timeout occurred, cmd=0x%x, "
540 "mb[0]=0x%x. Scheduling ISP abort ",
541 command, mcp->mb[0]);
542 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
543 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
544 /* Allow next mbx cmd to come in. */
545 complete(&ha->mbx_cmd_comp);
546 if (ha->isp_ops->abort_isp(vha)) {
547 /* Failed. retry later. */
548 set_bit(ISP_ABORT_NEEDED,
551 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
552 ql_dbg(ql_dbg_mbx, vha, 0x101f,
553 "Finished abort_isp.\n");
560 /* Allow next mbx cmd to come in. */
561 complete(&ha->mbx_cmd_comp);
564 if (rval == QLA_ABORTED) {
565 ql_log(ql_log_info, vha, 0xd035,
566 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
569 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
570 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
571 dev_name(&ha->pdev->dev), 0x1020+0x800,
575 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
576 if (mboxes & BIT_0) {
577 printk(" mb[%u]=%x", i, mcp->mb[i]);
580 pr_warn(" cmd=%x ****\n", command);
582 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
583 ql_dbg(ql_dbg_mbx, vha, 0x1198,
584 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
585 RD_REG_DWORD(®->isp24.host_status),
586 RD_REG_DWORD(®->isp24.ictrl),
587 RD_REG_DWORD(®->isp24.istatus));
589 ql_dbg(ql_dbg_mbx, vha, 0x1206,
590 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
591 RD_REG_WORD(®->isp.ctrl_status),
592 RD_REG_WORD(®->isp.ictrl),
593 RD_REG_WORD(®->isp.istatus));
596 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
603 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
604 uint32_t risc_code_size)
607 struct qla_hw_data *ha = vha->hw;
609 mbx_cmd_t *mcp = &mc;
611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
612 "Entered %s.\n", __func__);
614 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
615 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
616 mcp->mb[8] = MSW(risc_addr);
617 mcp->out_mb = MBX_8|MBX_0;
619 mcp->mb[0] = MBC_LOAD_RISC_RAM;
622 mcp->mb[1] = LSW(risc_addr);
623 mcp->mb[2] = MSW(req_dma);
624 mcp->mb[3] = LSW(req_dma);
625 mcp->mb[6] = MSW(MSD(req_dma));
626 mcp->mb[7] = LSW(MSD(req_dma));
627 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
628 if (IS_FWI2_CAPABLE(ha)) {
629 mcp->mb[4] = MSW(risc_code_size);
630 mcp->mb[5] = LSW(risc_code_size);
631 mcp->out_mb |= MBX_5|MBX_4;
633 mcp->mb[4] = LSW(risc_code_size);
634 mcp->out_mb |= MBX_4;
638 mcp->tov = MBX_TOV_SECONDS;
640 rval = qla2x00_mailbox_command(vha, mcp);
642 if (rval != QLA_SUCCESS) {
643 ql_dbg(ql_dbg_mbx, vha, 0x1023,
644 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
647 "Done %s.\n", __func__);
653 #define EXTENDED_BB_CREDITS BIT_0
654 #define NVME_ENABLE_FLAG BIT_3
655 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
657 uint16_t mb4 = BIT_0;
659 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
660 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
665 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
667 uint16_t mb4 = BIT_0;
669 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
670 struct nvram_81xx *nv = ha->nvram;
672 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
680 * Start adapter firmware.
683 * ha = adapter block pointer.
684 * TARGET_QUEUE_LOCK must be released.
685 * ADAPTER_STATE_LOCK must be released.
688 * qla2x00 local function return status code.
694 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
697 struct qla_hw_data *ha = vha->hw;
699 mbx_cmd_t *mcp = &mc;
701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
702 "Entered %s.\n", __func__);
704 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
707 if (IS_FWI2_CAPABLE(ha)) {
708 mcp->mb[1] = MSW(risc_addr);
709 mcp->mb[2] = LSW(risc_addr);
712 ha->flags.using_lr_setting = 0;
713 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
715 if (ql2xautodetectsfp) {
716 if (ha->flags.detected_lr_sfp) {
718 qla25xx_set_sfp_lr_dist(ha);
719 ha->flags.using_lr_setting = 1;
722 struct nvram_81xx *nv = ha->nvram;
723 /* set LR distance if specified in nvram */
724 if (nv->enhanced_features &
725 NEF_LR_DIST_ENABLE) {
727 qla25xx_set_nvr_lr_dist(ha);
728 ha->flags.using_lr_setting = 1;
733 if (ql2xnvmeenable && IS_QLA27XX(ha))
734 mcp->mb[4] |= NVME_ENABLE_FLAG;
736 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
737 struct nvram_81xx *nv = ha->nvram;
738 /* set minimum speed if specified in nvram */
739 if (nv->min_link_speed >= 2 &&
740 nv->min_link_speed <= 5) {
742 mcp->mb[11] = nv->min_link_speed;
743 mcp->out_mb |= MBX_11;
745 vha->min_link_speed_feat = nv->min_link_speed;
749 if (ha->flags.exlogins_enabled)
750 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
752 if (ha->flags.exchoffld_enabled)
753 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
755 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
756 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
758 mcp->mb[1] = LSW(risc_addr);
759 mcp->out_mb |= MBX_1;
760 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
762 mcp->out_mb |= MBX_2;
766 mcp->tov = MBX_TOV_SECONDS;
768 rval = qla2x00_mailbox_command(vha, mcp);
770 if (rval != QLA_SUCCESS) {
771 ql_dbg(ql_dbg_mbx, vha, 0x1026,
772 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
774 if (IS_FWI2_CAPABLE(ha)) {
775 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
776 ql_dbg(ql_dbg_mbx, vha, 0x119a,
777 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
778 ql_dbg(ql_dbg_mbx, vha, 0x1027,
779 "exchanges=%x.\n", mcp->mb[1]);
780 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
781 ha->max_speed_sup = mcp->mb[2] & BIT_0;
782 ql_dbg(ql_dbg_mbx, vha, 0x119b,
783 "Maximum speed supported=%s.\n",
784 ha->max_speed_sup ? "32Gps" : "16Gps");
785 if (vha->min_link_speed_feat) {
786 ha->min_link_speed = mcp->mb[5];
787 ql_dbg(ql_dbg_mbx, vha, 0x119c,
788 "Minimum speed set=%s.\n",
789 mcp->mb[5] == 5 ? "32Gps" :
790 mcp->mb[5] == 4 ? "16Gps" :
791 mcp->mb[5] == 3 ? "8Gps" :
792 mcp->mb[5] == 2 ? "4Gps" :
797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
805 * qla_get_exlogin_status
806 * Get extended login status
807 * uses the memory offload control/status Mailbox
810 * ha: adapter state pointer.
811 * fwopt: firmware options
814 * qla2x00 local function status
819 #define FETCH_XLOGINS_STAT 0x8
821 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
822 uint16_t *ex_logins_cnt)
826 mbx_cmd_t *mcp = &mc;
828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
829 "Entered %s\n", __func__);
831 memset(mcp->mb, 0 , sizeof(mcp->mb));
832 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
833 mcp->mb[1] = FETCH_XLOGINS_STAT;
834 mcp->out_mb = MBX_1|MBX_0;
835 mcp->in_mb = MBX_10|MBX_4|MBX_0;
836 mcp->tov = MBX_TOV_SECONDS;
839 rval = qla2x00_mailbox_command(vha, mcp);
840 if (rval != QLA_SUCCESS) {
841 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
843 *buf_sz = mcp->mb[4];
844 *ex_logins_cnt = mcp->mb[10];
846 ql_log(ql_log_info, vha, 0x1190,
847 "buffer size 0x%x, exchange login count=%d\n",
848 mcp->mb[4], mcp->mb[10]);
850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
851 "Done %s.\n", __func__);
858 * qla_set_exlogin_mem_cfg
859 * set extended login memory configuration
860 * Mbx needs to be issues before init_cb is set
863 * ha: adapter state pointer.
864 * buffer: buffer pointer
865 * phys_addr: physical address of buffer
866 * size: size of buffer
867 * TARGET_QUEUE_LOCK must be released
868 * ADAPTER_STATE_LOCK must be release
871 * qla2x00 local funxtion status code.
876 #define CONFIG_XLOGINS_MEM 0x3
878 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
882 mbx_cmd_t *mcp = &mc;
883 struct qla_hw_data *ha = vha->hw;
885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
886 "Entered %s.\n", __func__);
888 memset(mcp->mb, 0 , sizeof(mcp->mb));
889 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
890 mcp->mb[1] = CONFIG_XLOGINS_MEM;
891 mcp->mb[2] = MSW(phys_addr);
892 mcp->mb[3] = LSW(phys_addr);
893 mcp->mb[6] = MSW(MSD(phys_addr));
894 mcp->mb[7] = LSW(MSD(phys_addr));
895 mcp->mb[8] = MSW(ha->exlogin_size);
896 mcp->mb[9] = LSW(ha->exlogin_size);
897 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
898 mcp->in_mb = MBX_11|MBX_0;
899 mcp->tov = MBX_TOV_SECONDS;
901 rval = qla2x00_mailbox_command(vha, mcp);
902 if (rval != QLA_SUCCESS) {
904 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
907 "Done %s.\n", __func__);
914 * qla_get_exchoffld_status
915 * Get exchange offload status
916 * uses the memory offload control/status Mailbox
919 * ha: adapter state pointer.
920 * fwopt: firmware options
923 * qla2x00 local function status
928 #define FETCH_XCHOFFLD_STAT 0x2
930 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
931 uint16_t *ex_logins_cnt)
935 mbx_cmd_t *mcp = &mc;
937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
938 "Entered %s\n", __func__);
940 memset(mcp->mb, 0 , sizeof(mcp->mb));
941 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
942 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
943 mcp->out_mb = MBX_1|MBX_0;
944 mcp->in_mb = MBX_10|MBX_4|MBX_0;
945 mcp->tov = MBX_TOV_SECONDS;
948 rval = qla2x00_mailbox_command(vha, mcp);
949 if (rval != QLA_SUCCESS) {
950 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
952 *buf_sz = mcp->mb[4];
953 *ex_logins_cnt = mcp->mb[10];
955 ql_log(ql_log_info, vha, 0x118e,
956 "buffer size 0x%x, exchange offload count=%d\n",
957 mcp->mb[4], mcp->mb[10]);
959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
960 "Done %s.\n", __func__);
967 * qla_set_exchoffld_mem_cfg
968 * Set exchange offload memory configuration
969 * Mbx needs to be issues before init_cb is set
972 * ha: adapter state pointer.
973 * buffer: buffer pointer
974 * phys_addr: physical address of buffer
975 * size: size of buffer
976 * TARGET_QUEUE_LOCK must be released
977 * ADAPTER_STATE_LOCK must be release
980 * qla2x00 local funxtion status code.
985 #define CONFIG_XCHOFFLD_MEM 0x3
987 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
991 mbx_cmd_t *mcp = &mc;
992 struct qla_hw_data *ha = vha->hw;
994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
995 "Entered %s.\n", __func__);
997 memset(mcp->mb, 0 , sizeof(mcp->mb));
998 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
999 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1000 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1001 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1002 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1003 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1004 mcp->mb[8] = MSW(ha->exchoffld_size);
1005 mcp->mb[9] = LSW(ha->exchoffld_size);
1006 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1007 mcp->in_mb = MBX_11|MBX_0;
1008 mcp->tov = MBX_TOV_SECONDS;
1010 rval = qla2x00_mailbox_command(vha, mcp);
1011 if (rval != QLA_SUCCESS) {
1013 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1016 "Done %s.\n", __func__);
1023 * qla2x00_get_fw_version
1024 * Get firmware version.
1027 * ha: adapter state pointer.
1028 * major: pointer for major number.
1029 * minor: pointer for minor number.
1030 * subminor: pointer for subminor number.
1033 * qla2x00 local function return status code.
1039 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1043 mbx_cmd_t *mcp = &mc;
1044 struct qla_hw_data *ha = vha->hw;
1046 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1047 "Entered %s.\n", __func__);
1049 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1050 mcp->out_mb = MBX_0;
1051 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1052 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1053 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1054 if (IS_FWI2_CAPABLE(ha))
1055 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1058 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1059 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
1062 mcp->tov = MBX_TOV_SECONDS;
1063 rval = qla2x00_mailbox_command(vha, mcp);
1064 if (rval != QLA_SUCCESS)
1067 /* Return mailbox data. */
1068 ha->fw_major_version = mcp->mb[1];
1069 ha->fw_minor_version = mcp->mb[2];
1070 ha->fw_subminor_version = mcp->mb[3];
1071 ha->fw_attributes = mcp->mb[6];
1072 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1073 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1075 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1077 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1078 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1079 ha->mpi_version[1] = mcp->mb[11] >> 8;
1080 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1081 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1082 ha->phy_version[0] = mcp->mb[8] & 0xff;
1083 ha->phy_version[1] = mcp->mb[9] >> 8;
1084 ha->phy_version[2] = mcp->mb[9] & 0xff;
1087 if (IS_FWI2_CAPABLE(ha)) {
1088 ha->fw_attributes_h = mcp->mb[15];
1089 ha->fw_attributes_ext[0] = mcp->mb[16];
1090 ha->fw_attributes_ext[1] = mcp->mb[17];
1091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1092 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1093 __func__, mcp->mb[15], mcp->mb[6]);
1094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1095 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1096 __func__, mcp->mb[17], mcp->mb[16]);
1098 if (ha->fw_attributes_h & 0x4)
1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1100 "%s: Firmware supports Extended Login 0x%x\n",
1101 __func__, ha->fw_attributes_h);
1103 if (ha->fw_attributes_h & 0x8)
1104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1105 "%s: Firmware supports Exchange Offload 0x%x\n",
1106 __func__, ha->fw_attributes_h);
1109 * FW supports nvme and driver load parameter requested nvme.
1110 * BIT 26 of fw_attributes indicates NVMe support.
1112 if ((ha->fw_attributes_h &
1113 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1115 vha->flags.nvme_enabled = 1;
1116 ql_log(ql_log_info, vha, 0xd302,
1117 "%s: FC-NVMe is Enabled (0x%x)\n",
1118 __func__, ha->fw_attributes_h);
1122 if (IS_QLA27XX(ha)) {
1123 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1124 ha->mpi_version[1] = mcp->mb[11] >> 8;
1125 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1126 ha->pep_version[0] = mcp->mb[13] & 0xff;
1127 ha->pep_version[1] = mcp->mb[14] >> 8;
1128 ha->pep_version[2] = mcp->mb[14] & 0xff;
1129 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1130 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1131 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1132 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1136 if (rval != QLA_SUCCESS) {
1138 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1142 "Done %s.\n", __func__);
1148 * qla2x00_get_fw_options
1149 * Set firmware options.
1152 * ha = adapter block pointer.
1153 * fwopt = pointer for firmware options.
1156 * qla2x00 local function return status code.
1162 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1166 mbx_cmd_t *mcp = &mc;
1168 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1169 "Entered %s.\n", __func__);
1171 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1172 mcp->out_mb = MBX_0;
1173 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1174 mcp->tov = MBX_TOV_SECONDS;
1176 rval = qla2x00_mailbox_command(vha, mcp);
1178 if (rval != QLA_SUCCESS) {
1180 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1182 fwopts[0] = mcp->mb[0];
1183 fwopts[1] = mcp->mb[1];
1184 fwopts[2] = mcp->mb[2];
1185 fwopts[3] = mcp->mb[3];
1187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1188 "Done %s.\n", __func__);
1196 * qla2x00_set_fw_options
1197 * Set firmware options.
1200 * ha = adapter block pointer.
1201 * fwopt = pointer for firmware options.
1204 * qla2x00 local function return status code.
1210 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1214 mbx_cmd_t *mcp = &mc;
1216 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1217 "Entered %s.\n", __func__);
1219 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1220 mcp->mb[1] = fwopts[1];
1221 mcp->mb[2] = fwopts[2];
1222 mcp->mb[3] = fwopts[3];
1223 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1225 if (IS_FWI2_CAPABLE(vha->hw)) {
1226 mcp->in_mb |= MBX_1;
1227 mcp->mb[10] = fwopts[10];
1228 mcp->out_mb |= MBX_10;
1230 mcp->mb[10] = fwopts[10];
1231 mcp->mb[11] = fwopts[11];
1232 mcp->mb[12] = 0; /* Undocumented, but used */
1233 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1235 mcp->tov = MBX_TOV_SECONDS;
1237 rval = qla2x00_mailbox_command(vha, mcp);
1239 fwopts[0] = mcp->mb[0];
1241 if (rval != QLA_SUCCESS) {
1243 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1244 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1247 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1248 "Done %s.\n", __func__);
1255 * qla2x00_mbx_reg_test
1256 * Mailbox register wrap test.
1259 * ha = adapter block pointer.
1260 * TARGET_QUEUE_LOCK must be released.
1261 * ADAPTER_STATE_LOCK must be released.
1264 * qla2x00 local function return status code.
1270 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1274 mbx_cmd_t *mcp = &mc;
1276 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1277 "Entered %s.\n", __func__);
1279 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1280 mcp->mb[1] = 0xAAAA;
1281 mcp->mb[2] = 0x5555;
1282 mcp->mb[3] = 0xAA55;
1283 mcp->mb[4] = 0x55AA;
1284 mcp->mb[5] = 0xA5A5;
1285 mcp->mb[6] = 0x5A5A;
1286 mcp->mb[7] = 0x2525;
1287 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1288 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1289 mcp->tov = MBX_TOV_SECONDS;
1291 rval = qla2x00_mailbox_command(vha, mcp);
1293 if (rval == QLA_SUCCESS) {
1294 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1295 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1296 rval = QLA_FUNCTION_FAILED;
1297 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1298 mcp->mb[7] != 0x2525)
1299 rval = QLA_FUNCTION_FAILED;
1302 if (rval != QLA_SUCCESS) {
1304 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1308 "Done %s.\n", __func__);
1315 * qla2x00_verify_checksum
1316 * Verify firmware checksum.
1319 * ha = adapter block pointer.
1320 * TARGET_QUEUE_LOCK must be released.
1321 * ADAPTER_STATE_LOCK must be released.
1324 * qla2x00 local function return status code.
1330 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1334 mbx_cmd_t *mcp = &mc;
1336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1337 "Entered %s.\n", __func__);
1339 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1340 mcp->out_mb = MBX_0;
1342 if (IS_FWI2_CAPABLE(vha->hw)) {
1343 mcp->mb[1] = MSW(risc_addr);
1344 mcp->mb[2] = LSW(risc_addr);
1345 mcp->out_mb |= MBX_2|MBX_1;
1346 mcp->in_mb |= MBX_2|MBX_1;
1348 mcp->mb[1] = LSW(risc_addr);
1349 mcp->out_mb |= MBX_1;
1350 mcp->in_mb |= MBX_1;
1353 mcp->tov = MBX_TOV_SECONDS;
1355 rval = qla2x00_mailbox_command(vha, mcp);
1357 if (rval != QLA_SUCCESS) {
1358 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1359 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1360 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1363 "Done %s.\n", __func__);
1370 * qla2x00_issue_iocb
1371 * Issue IOCB using mailbox command
1374 * ha = adapter state pointer.
1375 * buffer = buffer pointer.
1376 * phys_addr = physical address of buffer.
1377 * size = size of buffer.
1378 * TARGET_QUEUE_LOCK must be released.
1379 * ADAPTER_STATE_LOCK must be released.
1382 * qla2x00 local function return status code.
1388 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1389 dma_addr_t phys_addr, size_t size, uint32_t tov)
1393 mbx_cmd_t *mcp = &mc;
1395 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1396 "Entered %s.\n", __func__);
1398 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1400 mcp->mb[2] = MSW(phys_addr);
1401 mcp->mb[3] = LSW(phys_addr);
1402 mcp->mb[6] = MSW(MSD(phys_addr));
1403 mcp->mb[7] = LSW(MSD(phys_addr));
1404 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1405 mcp->in_mb = MBX_2|MBX_0;
1408 rval = qla2x00_mailbox_command(vha, mcp);
1410 if (rval != QLA_SUCCESS) {
1412 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1414 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1416 /* Mask reserved bits. */
1417 sts_entry->entry_status &=
1418 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1420 "Done %s.\n", __func__);
1427 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1430 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1435 * qla2x00_abort_command
1436 * Abort command aborts a specified IOCB.
1439 * ha = adapter block pointer.
1440 * sp = SB structure pointer.
1443 * qla2x00 local function return status code.
1449 qla2x00_abort_command(srb_t *sp)
1451 unsigned long flags = 0;
1453 uint32_t handle = 0;
1455 mbx_cmd_t *mcp = &mc;
1456 fc_port_t *fcport = sp->fcport;
1457 scsi_qla_host_t *vha = fcport->vha;
1458 struct qla_hw_data *ha = vha->hw;
1459 struct req_que *req;
1460 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1462 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1463 "Entered %s.\n", __func__);
1465 if (vha->flags.qpairs_available && sp->qpair)
1466 req = sp->qpair->req;
1470 spin_lock_irqsave(&ha->hardware_lock, flags);
1471 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1472 if (req->outstanding_cmds[handle] == sp)
1475 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1477 if (handle == req->num_outstanding_cmds) {
1478 /* command not found */
1479 return QLA_FUNCTION_FAILED;
1482 mcp->mb[0] = MBC_ABORT_COMMAND;
1483 if (HAS_EXTENDED_IDS(ha))
1484 mcp->mb[1] = fcport->loop_id;
1486 mcp->mb[1] = fcport->loop_id << 8;
1487 mcp->mb[2] = (uint16_t)handle;
1488 mcp->mb[3] = (uint16_t)(handle >> 16);
1489 mcp->mb[6] = (uint16_t)cmd->device->lun;
1490 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1492 mcp->tov = MBX_TOV_SECONDS;
1494 rval = qla2x00_mailbox_command(vha, mcp);
1496 if (rval != QLA_SUCCESS) {
1497 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1499 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1500 "Done %s.\n", __func__);
1507 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1511 mbx_cmd_t *mcp = &mc;
1512 scsi_qla_host_t *vha;
1513 struct req_que *req;
1514 struct rsp_que *rsp;
1518 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1519 "Entered %s.\n", __func__);
1521 req = vha->hw->req_q_map[0];
1523 mcp->mb[0] = MBC_ABORT_TARGET;
1524 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1525 if (HAS_EXTENDED_IDS(vha->hw)) {
1526 mcp->mb[1] = fcport->loop_id;
1528 mcp->out_mb |= MBX_10;
1530 mcp->mb[1] = fcport->loop_id << 8;
1532 mcp->mb[2] = vha->hw->loop_reset_delay;
1533 mcp->mb[9] = vha->vp_idx;
1536 mcp->tov = MBX_TOV_SECONDS;
1538 rval = qla2x00_mailbox_command(vha, mcp);
1539 if (rval != QLA_SUCCESS) {
1540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1541 "Failed=%x.\n", rval);
1544 /* Issue marker IOCB. */
1545 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1547 if (rval2 != QLA_SUCCESS) {
1548 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1549 "Failed to issue marker IOCB (%x).\n", rval2);
1551 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1552 "Done %s.\n", __func__);
1559 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1563 mbx_cmd_t *mcp = &mc;
1564 scsi_qla_host_t *vha;
1565 struct req_que *req;
1566 struct rsp_que *rsp;
1570 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1571 "Entered %s.\n", __func__);
1573 req = vha->hw->req_q_map[0];
1575 mcp->mb[0] = MBC_LUN_RESET;
1576 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1577 if (HAS_EXTENDED_IDS(vha->hw))
1578 mcp->mb[1] = fcport->loop_id;
1580 mcp->mb[1] = fcport->loop_id << 8;
1581 mcp->mb[2] = (u32)l;
1583 mcp->mb[9] = vha->vp_idx;
1586 mcp->tov = MBX_TOV_SECONDS;
1588 rval = qla2x00_mailbox_command(vha, mcp);
1589 if (rval != QLA_SUCCESS) {
1590 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1593 /* Issue marker IOCB. */
1594 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1596 if (rval2 != QLA_SUCCESS) {
1597 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1598 "Failed to issue marker IOCB (%x).\n", rval2);
1600 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1601 "Done %s.\n", __func__);
1608 * qla2x00_get_adapter_id
1609 * Get adapter ID and topology.
1612 * ha = adapter block pointer.
1613 * id = pointer for loop ID.
1614 * al_pa = pointer for AL_PA.
1615 * area = pointer for area.
1616 * domain = pointer for domain.
1617 * top = pointer for topology.
1618 * TARGET_QUEUE_LOCK must be released.
1619 * ADAPTER_STATE_LOCK must be released.
1622 * qla2x00 local function return status code.
1628 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1629 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1633 mbx_cmd_t *mcp = &mc;
1635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1636 "Entered %s.\n", __func__);
1638 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1639 mcp->mb[9] = vha->vp_idx;
1640 mcp->out_mb = MBX_9|MBX_0;
1641 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1642 if (IS_CNA_CAPABLE(vha->hw))
1643 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1644 if (IS_FWI2_CAPABLE(vha->hw))
1645 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1646 if (IS_QLA27XX(vha->hw))
1647 mcp->in_mb |= MBX_15;
1648 mcp->tov = MBX_TOV_SECONDS;
1650 rval = qla2x00_mailbox_command(vha, mcp);
1651 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1652 rval = QLA_COMMAND_ERROR;
1653 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1654 rval = QLA_INVALID_COMMAND;
1658 *al_pa = LSB(mcp->mb[2]);
1659 *area = MSB(mcp->mb[2]);
1660 *domain = LSB(mcp->mb[3]);
1662 *sw_cap = mcp->mb[7];
1664 if (rval != QLA_SUCCESS) {
1666 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1668 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1669 "Done %s.\n", __func__);
1671 if (IS_CNA_CAPABLE(vha->hw)) {
1672 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1673 vha->fcoe_fcf_idx = mcp->mb[10];
1674 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1675 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1676 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1677 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1678 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1679 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1681 /* If FA-WWN supported */
1682 if (IS_FAWWN_CAPABLE(vha->hw)) {
1683 if (mcp->mb[7] & BIT_14) {
1684 vha->port_name[0] = MSB(mcp->mb[16]);
1685 vha->port_name[1] = LSB(mcp->mb[16]);
1686 vha->port_name[2] = MSB(mcp->mb[17]);
1687 vha->port_name[3] = LSB(mcp->mb[17]);
1688 vha->port_name[4] = MSB(mcp->mb[18]);
1689 vha->port_name[5] = LSB(mcp->mb[18]);
1690 vha->port_name[6] = MSB(mcp->mb[19]);
1691 vha->port_name[7] = LSB(mcp->mb[19]);
1692 fc_host_port_name(vha->host) =
1693 wwn_to_u64(vha->port_name);
1694 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1695 "FA-WWN acquired %016llx\n",
1696 wwn_to_u64(vha->port_name));
1700 if (IS_QLA27XX(vha->hw))
1701 vha->bbcr = mcp->mb[15];
1708 * qla2x00_get_retry_cnt
1709 * Get current firmware login retry count and delay.
1712 * ha = adapter block pointer.
1713 * retry_cnt = pointer to login retry count.
1714 * tov = pointer to login timeout value.
1717 * qla2x00 local function return status code.
1723 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1729 mbx_cmd_t *mcp = &mc;
1731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1732 "Entered %s.\n", __func__);
1734 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1735 mcp->out_mb = MBX_0;
1736 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1737 mcp->tov = MBX_TOV_SECONDS;
1739 rval = qla2x00_mailbox_command(vha, mcp);
1741 if (rval != QLA_SUCCESS) {
1743 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1744 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1746 /* Convert returned data and check our values. */
1747 *r_a_tov = mcp->mb[3] / 2;
1748 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1749 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1750 /* Update to the larger values */
1751 *retry_cnt = (uint8_t)mcp->mb[1];
1755 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1756 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1763 * qla2x00_init_firmware
1764 * Initialize adapter firmware.
1767 * ha = adapter block pointer.
1768 * dptr = Initialization control block pointer.
1769 * size = size of initialization control block.
1770 * TARGET_QUEUE_LOCK must be released.
1771 * ADAPTER_STATE_LOCK must be released.
1774 * qla2x00 local function return status code.
1780 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1784 mbx_cmd_t *mcp = &mc;
1785 struct qla_hw_data *ha = vha->hw;
1787 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1788 "Entered %s.\n", __func__);
1790 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1791 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1792 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1794 if (ha->flags.npiv_supported)
1795 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1797 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1800 mcp->mb[2] = MSW(ha->init_cb_dma);
1801 mcp->mb[3] = LSW(ha->init_cb_dma);
1802 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1803 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1804 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1805 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1807 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1808 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1809 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1810 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1811 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1812 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1814 /* 1 and 2 should normally be captured. */
1815 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1816 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1817 /* mb3 is additional info about the installed SFP. */
1818 mcp->in_mb |= MBX_3;
1819 mcp->buf_size = size;
1820 mcp->flags = MBX_DMA_OUT;
1821 mcp->tov = MBX_TOV_SECONDS;
1822 rval = qla2x00_mailbox_command(vha, mcp);
1824 if (rval != QLA_SUCCESS) {
1826 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1827 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1828 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1830 if (IS_QLA27XX(ha)) {
1831 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1832 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1833 "Invalid SFP/Validation Failed\n");
1835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1836 "Done %s.\n", __func__);
1844 * qla2x00_get_port_database
1845 * Issue normal/enhanced get port database mailbox command
1846 * and copy device name as necessary.
1849 * ha = adapter state pointer.
1850 * dev = structure pointer.
1851 * opt = enhanced cmd option byte.
1854 * qla2x00 local function return status code.
1860 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1864 mbx_cmd_t *mcp = &mc;
1865 port_database_t *pd;
1866 struct port_database_24xx *pd24;
1868 struct qla_hw_data *ha = vha->hw;
1870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1871 "Entered %s.\n", __func__);
1874 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1876 ql_log(ql_log_warn, vha, 0x1050,
1877 "Failed to allocate port database structure.\n");
1879 return QLA_MEMORY_ALLOC_FAILED;
1882 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1883 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1884 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1885 mcp->mb[2] = MSW(pd_dma);
1886 mcp->mb[3] = LSW(pd_dma);
1887 mcp->mb[6] = MSW(MSD(pd_dma));
1888 mcp->mb[7] = LSW(MSD(pd_dma));
1889 mcp->mb[9] = vha->vp_idx;
1890 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1892 if (IS_FWI2_CAPABLE(ha)) {
1893 mcp->mb[1] = fcport->loop_id;
1895 mcp->out_mb |= MBX_10|MBX_1;
1896 mcp->in_mb |= MBX_1;
1897 } else if (HAS_EXTENDED_IDS(ha)) {
1898 mcp->mb[1] = fcport->loop_id;
1900 mcp->out_mb |= MBX_10|MBX_1;
1902 mcp->mb[1] = fcport->loop_id << 8 | opt;
1903 mcp->out_mb |= MBX_1;
1905 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1906 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1907 mcp->flags = MBX_DMA_IN;
1908 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1909 rval = qla2x00_mailbox_command(vha, mcp);
1910 if (rval != QLA_SUCCESS)
1913 if (IS_FWI2_CAPABLE(ha)) {
1915 u8 current_login_state, last_login_state;
1917 pd24 = (struct port_database_24xx *) pd;
1919 /* Check for logged in state. */
1920 if (fcport->fc4f_nvme) {
1921 current_login_state = pd24->current_login_state >> 4;
1922 last_login_state = pd24->last_login_state >> 4;
1924 current_login_state = pd24->current_login_state & 0xf;
1925 last_login_state = pd24->last_login_state & 0xf;
1927 fcport->current_login_state = pd24->current_login_state;
1928 fcport->last_login_state = pd24->last_login_state;
1930 /* Check for logged in state. */
1931 if (current_login_state != PDS_PRLI_COMPLETE &&
1932 last_login_state != PDS_PRLI_COMPLETE) {
1933 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1934 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1935 current_login_state, last_login_state,
1937 rval = QLA_FUNCTION_FAILED;
1943 if (fcport->loop_id == FC_NO_LOOP_ID ||
1944 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1945 memcmp(fcport->port_name, pd24->port_name, 8))) {
1946 /* We lost the device mid way. */
1947 rval = QLA_NOT_LOGGED_IN;
1951 /* Names are little-endian. */
1952 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1953 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1955 /* Get port_id of device. */
1956 fcport->d_id.b.domain = pd24->port_id[0];
1957 fcport->d_id.b.area = pd24->port_id[1];
1958 fcport->d_id.b.al_pa = pd24->port_id[2];
1959 fcport->d_id.b.rsvd_1 = 0;
1961 /* If not target must be initiator or unknown type. */
1962 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1963 fcport->port_type = FCT_INITIATOR;
1965 fcport->port_type = FCT_TARGET;
1967 /* Passback COS information. */
1968 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1969 FC_COS_CLASS2 : FC_COS_CLASS3;
1971 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1972 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1976 /* Check for logged in state. */
1977 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1978 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1979 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1980 "Unable to verify login-state (%x/%x) - "
1981 "portid=%02x%02x%02x.\n", pd->master_state,
1982 pd->slave_state, fcport->d_id.b.domain,
1983 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1984 rval = QLA_FUNCTION_FAILED;
1988 if (fcport->loop_id == FC_NO_LOOP_ID ||
1989 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1990 memcmp(fcport->port_name, pd->port_name, 8))) {
1991 /* We lost the device mid way. */
1992 rval = QLA_NOT_LOGGED_IN;
1996 /* Names are little-endian. */
1997 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1998 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2000 /* Get port_id of device. */
2001 fcport->d_id.b.domain = pd->port_id[0];
2002 fcport->d_id.b.area = pd->port_id[3];
2003 fcport->d_id.b.al_pa = pd->port_id[2];
2004 fcport->d_id.b.rsvd_1 = 0;
2006 /* If not target must be initiator or unknown type. */
2007 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2008 fcport->port_type = FCT_INITIATOR;
2010 fcport->port_type = FCT_TARGET;
2012 /* Passback COS information. */
2013 fcport->supported_classes = (pd->options & BIT_4) ?
2014 FC_COS_CLASS2: FC_COS_CLASS3;
2018 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2021 if (rval != QLA_SUCCESS) {
2022 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2023 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2024 mcp->mb[0], mcp->mb[1]);
2026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2027 "Done %s.\n", __func__);
2034 * qla2x00_get_firmware_state
2035 * Get adapter firmware state.
2038 * ha = adapter block pointer.
2039 * dptr = pointer for firmware state.
2040 * TARGET_QUEUE_LOCK must be released.
2041 * ADAPTER_STATE_LOCK must be released.
2044 * qla2x00 local function return status code.
2050 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2054 mbx_cmd_t *mcp = &mc;
2055 struct qla_hw_data *ha = vha->hw;
2057 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2058 "Entered %s.\n", __func__);
2060 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2061 mcp->out_mb = MBX_0;
2062 if (IS_FWI2_CAPABLE(vha->hw))
2063 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2065 mcp->in_mb = MBX_1|MBX_0;
2066 mcp->tov = MBX_TOV_SECONDS;
2068 rval = qla2x00_mailbox_command(vha, mcp);
2070 /* Return firmware states. */
2071 states[0] = mcp->mb[1];
2072 if (IS_FWI2_CAPABLE(vha->hw)) {
2073 states[1] = mcp->mb[2];
2074 states[2] = mcp->mb[3]; /* SFP info */
2075 states[3] = mcp->mb[4];
2076 states[4] = mcp->mb[5];
2077 states[5] = mcp->mb[6]; /* DPORT status */
2080 if (rval != QLA_SUCCESS) {
2082 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2084 if (IS_QLA27XX(ha)) {
2085 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2086 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2087 "Invalid SFP/Validation Failed\n");
2089 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2090 "Done %s.\n", __func__);
2097 * qla2x00_get_port_name
2098 * Issue get port name mailbox command.
2099 * Returned name is in big endian format.
2102 * ha = adapter block pointer.
2103 * loop_id = loop ID of device.
2104 * name = pointer for name.
2105 * TARGET_QUEUE_LOCK must be released.
2106 * ADAPTER_STATE_LOCK must be released.
2109 * qla2x00 local function return status code.
2115 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2120 mbx_cmd_t *mcp = &mc;
2122 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2123 "Entered %s.\n", __func__);
2125 mcp->mb[0] = MBC_GET_PORT_NAME;
2126 mcp->mb[9] = vha->vp_idx;
2127 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2128 if (HAS_EXTENDED_IDS(vha->hw)) {
2129 mcp->mb[1] = loop_id;
2131 mcp->out_mb |= MBX_10;
2133 mcp->mb[1] = loop_id << 8 | opt;
2136 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2137 mcp->tov = MBX_TOV_SECONDS;
2139 rval = qla2x00_mailbox_command(vha, mcp);
2141 if (rval != QLA_SUCCESS) {
2143 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2146 /* This function returns name in big endian. */
2147 name[0] = MSB(mcp->mb[2]);
2148 name[1] = LSB(mcp->mb[2]);
2149 name[2] = MSB(mcp->mb[3]);
2150 name[3] = LSB(mcp->mb[3]);
2151 name[4] = MSB(mcp->mb[6]);
2152 name[5] = LSB(mcp->mb[6]);
2153 name[6] = MSB(mcp->mb[7]);
2154 name[7] = LSB(mcp->mb[7]);
2157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2158 "Done %s.\n", __func__);
2165 * qla24xx_link_initialization
2166 * Issue link initialization mailbox command.
2169 * ha = adapter block pointer.
2170 * TARGET_QUEUE_LOCK must be released.
2171 * ADAPTER_STATE_LOCK must be released.
2174 * qla2x00 local function return status code.
2180 qla24xx_link_initialize(scsi_qla_host_t *vha)
2184 mbx_cmd_t *mcp = &mc;
2186 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2187 "Entered %s.\n", __func__);
2189 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2190 return QLA_FUNCTION_FAILED;
2192 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2194 if (vha->hw->operating_mode == LOOP)
2195 mcp->mb[1] |= BIT_6;
2197 mcp->mb[1] |= BIT_5;
2200 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2202 mcp->tov = MBX_TOV_SECONDS;
2204 rval = qla2x00_mailbox_command(vha, mcp);
2206 if (rval != QLA_SUCCESS) {
2207 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2210 "Done %s.\n", __func__);
2218 * Issue LIP reset mailbox command.
2221 * ha = adapter block pointer.
2222 * TARGET_QUEUE_LOCK must be released.
2223 * ADAPTER_STATE_LOCK must be released.
2226 * qla2x00 local function return status code.
2232 qla2x00_lip_reset(scsi_qla_host_t *vha)
2236 mbx_cmd_t *mcp = &mc;
2238 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2239 "Entered %s.\n", __func__);
2241 if (IS_CNA_CAPABLE(vha->hw)) {
2242 /* Logout across all FCFs. */
2243 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2246 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2247 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2248 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2249 if (N2N_TOPO(vha->hw))
2250 mcp->mb[1] = BIT_4; /* re-init */
2252 mcp->mb[1] = BIT_6; /* LIP */
2254 mcp->mb[3] = vha->hw->loop_reset_delay;
2255 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2257 mcp->mb[0] = MBC_LIP_RESET;
2258 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2259 if (HAS_EXTENDED_IDS(vha->hw)) {
2260 mcp->mb[1] = 0x00ff;
2262 mcp->out_mb |= MBX_10;
2264 mcp->mb[1] = 0xff00;
2266 mcp->mb[2] = vha->hw->loop_reset_delay;
2270 mcp->tov = MBX_TOV_SECONDS;
2272 rval = qla2x00_mailbox_command(vha, mcp);
2274 if (rval != QLA_SUCCESS) {
2276 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2279 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2280 "Done %s.\n", __func__);
2291 * ha = adapter block pointer.
2292 * sns = pointer for command.
2293 * cmd_size = command size.
2294 * buf_size = response/command size.
2295 * TARGET_QUEUE_LOCK must be released.
2296 * ADAPTER_STATE_LOCK must be released.
2299 * qla2x00 local function return status code.
2305 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2306 uint16_t cmd_size, size_t buf_size)
2310 mbx_cmd_t *mcp = &mc;
2312 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2313 "Entered %s.\n", __func__);
2315 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2316 "Retry cnt=%d ratov=%d total tov=%d.\n",
2317 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2319 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2320 mcp->mb[1] = cmd_size;
2321 mcp->mb[2] = MSW(sns_phys_address);
2322 mcp->mb[3] = LSW(sns_phys_address);
2323 mcp->mb[6] = MSW(MSD(sns_phys_address));
2324 mcp->mb[7] = LSW(MSD(sns_phys_address));
2325 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2326 mcp->in_mb = MBX_0|MBX_1;
2327 mcp->buf_size = buf_size;
2328 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2329 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2330 rval = qla2x00_mailbox_command(vha, mcp);
2332 if (rval != QLA_SUCCESS) {
2334 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2335 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2336 rval, mcp->mb[0], mcp->mb[1]);
2339 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2340 "Done %s.\n", __func__);
2347 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2348 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2352 struct logio_entry_24xx *lg;
2355 struct qla_hw_data *ha = vha->hw;
2356 struct req_que *req;
2358 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2359 "Entered %s.\n", __func__);
2361 if (vha->vp_idx && vha->qpair)
2362 req = vha->qpair->req;
2364 req = ha->req_q_map[0];
2366 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2368 ql_log(ql_log_warn, vha, 0x1062,
2369 "Failed to allocate login IOCB.\n");
2370 return QLA_MEMORY_ALLOC_FAILED;
2373 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2374 lg->entry_count = 1;
2375 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2376 lg->nport_handle = cpu_to_le16(loop_id);
2377 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2379 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2381 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2382 lg->port_id[0] = al_pa;
2383 lg->port_id[1] = area;
2384 lg->port_id[2] = domain;
2385 lg->vp_index = vha->vp_idx;
2386 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2387 (ha->r_a_tov / 10 * 2) + 2);
2388 if (rval != QLA_SUCCESS) {
2389 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2390 "Failed to issue login IOCB (%x).\n", rval);
2391 } else if (lg->entry_status != 0) {
2392 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2393 "Failed to complete IOCB -- error status (%x).\n",
2395 rval = QLA_FUNCTION_FAILED;
2396 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2397 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2398 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2400 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2401 "Failed to complete IOCB -- completion status (%x) "
2402 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2406 case LSC_SCODE_PORTID_USED:
2407 mb[0] = MBS_PORT_ID_USED;
2408 mb[1] = LSW(iop[1]);
2410 case LSC_SCODE_NPORT_USED:
2411 mb[0] = MBS_LOOP_ID_USED;
2413 case LSC_SCODE_NOLINK:
2414 case LSC_SCODE_NOIOCB:
2415 case LSC_SCODE_NOXCB:
2416 case LSC_SCODE_CMD_FAILED:
2417 case LSC_SCODE_NOFABRIC:
2418 case LSC_SCODE_FW_NOT_READY:
2419 case LSC_SCODE_NOT_LOGGED_IN:
2420 case LSC_SCODE_NOPCB:
2421 case LSC_SCODE_ELS_REJECT:
2422 case LSC_SCODE_CMD_PARAM_ERR:
2423 case LSC_SCODE_NONPORT:
2424 case LSC_SCODE_LOGGED_IN:
2425 case LSC_SCODE_NOFLOGI_ACC:
2427 mb[0] = MBS_COMMAND_ERROR;
2431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2432 "Done %s.\n", __func__);
2434 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2436 mb[0] = MBS_COMMAND_COMPLETE;
2438 if (iop[0] & BIT_4) {
2444 /* Passback COS information. */
2446 if (lg->io_parameter[7] || lg->io_parameter[8])
2447 mb[10] |= BIT_0; /* Class 2. */
2448 if (lg->io_parameter[9] || lg->io_parameter[10])
2449 mb[10] |= BIT_1; /* Class 3. */
2450 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2451 mb[10] |= BIT_7; /* Confirmed Completion
2456 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2462 * qla2x00_login_fabric
2463 * Issue login fabric port mailbox command.
2466 * ha = adapter block pointer.
2467 * loop_id = device loop ID.
2468 * domain = device domain.
2469 * area = device area.
2470 * al_pa = device AL_PA.
2471 * status = pointer for return status.
2472 * opt = command options.
2473 * TARGET_QUEUE_LOCK must be released.
2474 * ADAPTER_STATE_LOCK must be released.
2477 * qla2x00 local function return status code.
2483 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2484 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2488 mbx_cmd_t *mcp = &mc;
2489 struct qla_hw_data *ha = vha->hw;
2491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2492 "Entered %s.\n", __func__);
2494 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2495 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2496 if (HAS_EXTENDED_IDS(ha)) {
2497 mcp->mb[1] = loop_id;
2499 mcp->out_mb |= MBX_10;
2501 mcp->mb[1] = (loop_id << 8) | opt;
2503 mcp->mb[2] = domain;
2504 mcp->mb[3] = area << 8 | al_pa;
2506 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2507 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2509 rval = qla2x00_mailbox_command(vha, mcp);
2511 /* Return mailbox statuses. */
2518 /* COS retrieved from Get-Port-Database mailbox command. */
2522 if (rval != QLA_SUCCESS) {
2523 /* RLU tmp code: need to change main mailbox_command function to
2524 * return ok even when the mailbox completion value is not
2525 * SUCCESS. The caller needs to be responsible to interpret
2526 * the return values of this mailbox command if we're not
2527 * to change too much of the existing code.
2529 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2530 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2531 mcp->mb[0] == 0x4006)
2535 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2536 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2537 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2541 "Done %s.\n", __func__);
2548 * qla2x00_login_local_device
2549 * Issue login loop port mailbox command.
2552 * ha = adapter block pointer.
2553 * loop_id = device loop ID.
2554 * opt = command options.
2557 * Return status code.
2564 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2565 uint16_t *mb_ret, uint8_t opt)
2569 mbx_cmd_t *mcp = &mc;
2570 struct qla_hw_data *ha = vha->hw;
2572 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2573 "Entered %s.\n", __func__);
2575 if (IS_FWI2_CAPABLE(ha))
2576 return qla24xx_login_fabric(vha, fcport->loop_id,
2577 fcport->d_id.b.domain, fcport->d_id.b.area,
2578 fcport->d_id.b.al_pa, mb_ret, opt);
2580 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2581 if (HAS_EXTENDED_IDS(ha))
2582 mcp->mb[1] = fcport->loop_id;
2584 mcp->mb[1] = fcport->loop_id << 8;
2586 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2587 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2588 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2590 rval = qla2x00_mailbox_command(vha, mcp);
2592 /* Return mailbox statuses. */
2593 if (mb_ret != NULL) {
2594 mb_ret[0] = mcp->mb[0];
2595 mb_ret[1] = mcp->mb[1];
2596 mb_ret[6] = mcp->mb[6];
2597 mb_ret[7] = mcp->mb[7];
2600 if (rval != QLA_SUCCESS) {
2601 /* AV tmp code: need to change main mailbox_command function to
2602 * return ok even when the mailbox completion value is not
2603 * SUCCESS. The caller needs to be responsible to interpret
2604 * the return values of this mailbox command if we're not
2605 * to change too much of the existing code.
2607 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2610 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2611 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2612 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2616 "Done %s.\n", __func__);
2623 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2624 uint8_t area, uint8_t al_pa)
2627 struct logio_entry_24xx *lg;
2629 struct qla_hw_data *ha = vha->hw;
2630 struct req_que *req;
2632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2633 "Entered %s.\n", __func__);
2635 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2637 ql_log(ql_log_warn, vha, 0x106e,
2638 "Failed to allocate logout IOCB.\n");
2639 return QLA_MEMORY_ALLOC_FAILED;
2643 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2644 lg->entry_count = 1;
2645 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2646 lg->nport_handle = cpu_to_le16(loop_id);
2648 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2650 lg->port_id[0] = al_pa;
2651 lg->port_id[1] = area;
2652 lg->port_id[2] = domain;
2653 lg->vp_index = vha->vp_idx;
2654 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2655 (ha->r_a_tov / 10 * 2) + 2);
2656 if (rval != QLA_SUCCESS) {
2657 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2658 "Failed to issue logout IOCB (%x).\n", rval);
2659 } else if (lg->entry_status != 0) {
2660 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2661 "Failed to complete IOCB -- error status (%x).\n",
2663 rval = QLA_FUNCTION_FAILED;
2664 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2665 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2666 "Failed to complete IOCB -- completion status (%x) "
2667 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2668 le32_to_cpu(lg->io_parameter[0]),
2669 le32_to_cpu(lg->io_parameter[1]));
2672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2673 "Done %s.\n", __func__);
2676 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2682 * qla2x00_fabric_logout
2683 * Issue logout fabric port mailbox command.
2686 * ha = adapter block pointer.
2687 * loop_id = device loop ID.
2688 * TARGET_QUEUE_LOCK must be released.
2689 * ADAPTER_STATE_LOCK must be released.
2692 * qla2x00 local function return status code.
2698 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2699 uint8_t area, uint8_t al_pa)
2703 mbx_cmd_t *mcp = &mc;
2705 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2706 "Entered %s.\n", __func__);
2708 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2709 mcp->out_mb = MBX_1|MBX_0;
2710 if (HAS_EXTENDED_IDS(vha->hw)) {
2711 mcp->mb[1] = loop_id;
2713 mcp->out_mb |= MBX_10;
2715 mcp->mb[1] = loop_id << 8;
2718 mcp->in_mb = MBX_1|MBX_0;
2719 mcp->tov = MBX_TOV_SECONDS;
2721 rval = qla2x00_mailbox_command(vha, mcp);
2723 if (rval != QLA_SUCCESS) {
2725 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2726 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2730 "Done %s.\n", __func__);
2737 * qla2x00_full_login_lip
2738 * Issue full login LIP mailbox command.
2741 * ha = adapter block pointer.
2742 * TARGET_QUEUE_LOCK must be released.
2743 * ADAPTER_STATE_LOCK must be released.
2746 * qla2x00 local function return status code.
2752 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2756 mbx_cmd_t *mcp = &mc;
2758 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2759 "Entered %s.\n", __func__);
2761 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2762 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2765 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2767 mcp->tov = MBX_TOV_SECONDS;
2769 rval = qla2x00_mailbox_command(vha, mcp);
2771 if (rval != QLA_SUCCESS) {
2773 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2777 "Done %s.\n", __func__);
2784 * qla2x00_get_id_list
2787 * ha = adapter block pointer.
2790 * qla2x00 local function return status code.
2796 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2801 mbx_cmd_t *mcp = &mc;
2803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2804 "Entered %s.\n", __func__);
2806 if (id_list == NULL)
2807 return QLA_FUNCTION_FAILED;
2809 mcp->mb[0] = MBC_GET_ID_LIST;
2810 mcp->out_mb = MBX_0;
2811 if (IS_FWI2_CAPABLE(vha->hw)) {
2812 mcp->mb[2] = MSW(id_list_dma);
2813 mcp->mb[3] = LSW(id_list_dma);
2814 mcp->mb[6] = MSW(MSD(id_list_dma));
2815 mcp->mb[7] = LSW(MSD(id_list_dma));
2817 mcp->mb[9] = vha->vp_idx;
2818 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2820 mcp->mb[1] = MSW(id_list_dma);
2821 mcp->mb[2] = LSW(id_list_dma);
2822 mcp->mb[3] = MSW(MSD(id_list_dma));
2823 mcp->mb[6] = LSW(MSD(id_list_dma));
2824 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2826 mcp->in_mb = MBX_1|MBX_0;
2827 mcp->tov = MBX_TOV_SECONDS;
2829 rval = qla2x00_mailbox_command(vha, mcp);
2831 if (rval != QLA_SUCCESS) {
2833 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2835 *entries = mcp->mb[1];
2836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2837 "Done %s.\n", __func__);
2844 * qla2x00_get_resource_cnts
2845 * Get current firmware resource counts.
2848 * ha = adapter block pointer.
2851 * qla2x00 local function return status code.
2857 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2859 struct qla_hw_data *ha = vha->hw;
2862 mbx_cmd_t *mcp = &mc;
2864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2865 "Entered %s.\n", __func__);
2867 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2868 mcp->out_mb = MBX_0;
2869 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2870 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2871 mcp->in_mb |= MBX_12;
2872 mcp->tov = MBX_TOV_SECONDS;
2874 rval = qla2x00_mailbox_command(vha, mcp);
2876 if (rval != QLA_SUCCESS) {
2878 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2879 "Failed mb[0]=%x.\n", mcp->mb[0]);
2881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2882 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2883 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2884 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2885 mcp->mb[11], mcp->mb[12]);
2887 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2888 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2889 ha->cur_fw_xcb_count = mcp->mb[3];
2890 ha->orig_fw_xcb_count = mcp->mb[6];
2891 ha->cur_fw_iocb_count = mcp->mb[7];
2892 ha->orig_fw_iocb_count = mcp->mb[10];
2893 if (ha->flags.npiv_supported)
2894 ha->max_npiv_vports = mcp->mb[11];
2895 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2896 ha->fw_max_fcf_count = mcp->mb[12];
2903 * qla2x00_get_fcal_position_map
2904 * Get FCAL (LILP) position map using mailbox command
2907 * ha = adapter state pointer.
2908 * pos_map = buffer pointer (can be NULL).
2911 * qla2x00 local function return status code.
2917 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2921 mbx_cmd_t *mcp = &mc;
2923 dma_addr_t pmap_dma;
2924 struct qla_hw_data *ha = vha->hw;
2926 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2927 "Entered %s.\n", __func__);
2929 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2931 ql_log(ql_log_warn, vha, 0x1080,
2932 "Memory alloc failed.\n");
2933 return QLA_MEMORY_ALLOC_FAILED;
2936 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2937 mcp->mb[2] = MSW(pmap_dma);
2938 mcp->mb[3] = LSW(pmap_dma);
2939 mcp->mb[6] = MSW(MSD(pmap_dma));
2940 mcp->mb[7] = LSW(MSD(pmap_dma));
2941 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2942 mcp->in_mb = MBX_1|MBX_0;
2943 mcp->buf_size = FCAL_MAP_SIZE;
2944 mcp->flags = MBX_DMA_IN;
2945 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2946 rval = qla2x00_mailbox_command(vha, mcp);
2948 if (rval == QLA_SUCCESS) {
2949 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2950 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2951 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2952 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2956 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2958 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2960 if (rval != QLA_SUCCESS) {
2961 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2963 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2964 "Done %s.\n", __func__);
2971 * qla2x00_get_link_status
2974 * ha = adapter block pointer.
2975 * loop_id = device loop ID.
2976 * ret_buf = pointer to link status return buffer.
2980 * BIT_0 = mem alloc error.
2981 * BIT_1 = mailbox error.
2984 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2985 struct link_statistics *stats, dma_addr_t stats_dma)
2989 mbx_cmd_t *mcp = &mc;
2990 uint32_t *iter = (void *)stats;
2991 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2992 struct qla_hw_data *ha = vha->hw;
2994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2995 "Entered %s.\n", __func__);
2997 mcp->mb[0] = MBC_GET_LINK_STATUS;
2998 mcp->mb[2] = MSW(LSD(stats_dma));
2999 mcp->mb[3] = LSW(LSD(stats_dma));
3000 mcp->mb[6] = MSW(MSD(stats_dma));
3001 mcp->mb[7] = LSW(MSD(stats_dma));
3002 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3004 if (IS_FWI2_CAPABLE(ha)) {
3005 mcp->mb[1] = loop_id;
3008 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3009 mcp->in_mb |= MBX_1;
3010 } else if (HAS_EXTENDED_IDS(ha)) {
3011 mcp->mb[1] = loop_id;
3013 mcp->out_mb |= MBX_10|MBX_1;
3015 mcp->mb[1] = loop_id << 8;
3016 mcp->out_mb |= MBX_1;
3018 mcp->tov = MBX_TOV_SECONDS;
3019 mcp->flags = IOCTL_CMD;
3020 rval = qla2x00_mailbox_command(vha, mcp);
3022 if (rval == QLA_SUCCESS) {
3023 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3024 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3025 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3026 rval = QLA_FUNCTION_FAILED;
3028 /* Re-endianize - firmware data is le32. */
3029 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3030 "Done %s.\n", __func__);
3031 for ( ; dwords--; iter++)
3036 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3043 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3044 dma_addr_t stats_dma, uint16_t options)
3048 mbx_cmd_t *mcp = &mc;
3049 uint32_t *iter, dwords;
3051 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3052 "Entered %s.\n", __func__);
3054 memset(&mc, 0, sizeof(mc));
3055 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3056 mc.mb[2] = MSW(stats_dma);
3057 mc.mb[3] = LSW(stats_dma);
3058 mc.mb[6] = MSW(MSD(stats_dma));
3059 mc.mb[7] = LSW(MSD(stats_dma));
3060 mc.mb[8] = sizeof(struct link_statistics) / 4;
3061 mc.mb[9] = cpu_to_le16(vha->vp_idx);
3062 mc.mb[10] = cpu_to_le16(options);
3064 rval = qla24xx_send_mb_cmd(vha, &mc);
3066 if (rval == QLA_SUCCESS) {
3067 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3068 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3069 "Failed mb[0]=%x.\n", mcp->mb[0]);
3070 rval = QLA_FUNCTION_FAILED;
3072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3073 "Done %s.\n", __func__);
3074 /* Re-endianize - firmware data is le32. */
3075 dwords = sizeof(struct link_statistics) / 4;
3076 iter = &stats->link_fail_cnt;
3077 for ( ; dwords--; iter++)
3082 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3089 qla24xx_abort_command(srb_t *sp)
3092 unsigned long flags = 0;
3094 struct abort_entry_24xx *abt;
3097 fc_port_t *fcport = sp->fcport;
3098 struct scsi_qla_host *vha = fcport->vha;
3099 struct qla_hw_data *ha = vha->hw;
3100 struct req_que *req = vha->req;
3101 struct qla_qpair *qpair = sp->qpair;
3103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3104 "Entered %s.\n", __func__);
3106 if (vha->flags.qpairs_available && sp->qpair)
3107 req = sp->qpair->req;
3109 return QLA_FUNCTION_FAILED;
3111 if (ql2xasynctmfenable)
3112 return qla24xx_async_abort_command(sp);
3114 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3115 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3116 if (req->outstanding_cmds[handle] == sp)
3119 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3120 if (handle == req->num_outstanding_cmds) {
3121 /* Command not found. */
3122 return QLA_FUNCTION_FAILED;
3125 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3127 ql_log(ql_log_warn, vha, 0x108d,
3128 "Failed to allocate abort IOCB.\n");
3129 return QLA_MEMORY_ALLOC_FAILED;
3132 abt->entry_type = ABORT_IOCB_TYPE;
3133 abt->entry_count = 1;
3134 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3135 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3136 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3137 abt->port_id[0] = fcport->d_id.b.al_pa;
3138 abt->port_id[1] = fcport->d_id.b.area;
3139 abt->port_id[2] = fcport->d_id.b.domain;
3140 abt->vp_index = fcport->vha->vp_idx;
3142 abt->req_que_no = cpu_to_le16(req->id);
3144 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3145 if (rval != QLA_SUCCESS) {
3146 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3147 "Failed to issue IOCB (%x).\n", rval);
3148 } else if (abt->entry_status != 0) {
3149 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3150 "Failed to complete IOCB -- error status (%x).\n",
3152 rval = QLA_FUNCTION_FAILED;
3153 } else if (abt->nport_handle != cpu_to_le16(0)) {
3154 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3155 "Failed to complete IOCB -- completion status (%x).\n",
3156 le16_to_cpu(abt->nport_handle));
3157 if (abt->nport_handle == CS_IOCB_ERROR)
3158 rval = QLA_FUNCTION_PARAMETER_ERROR;
3160 rval = QLA_FUNCTION_FAILED;
3162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3163 "Done %s.\n", __func__);
3166 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3171 struct tsk_mgmt_cmd {
3173 struct tsk_mgmt_entry tsk;
3174 struct sts_entry_24xx sts;
3179 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3180 uint64_t l, int tag)
3183 struct tsk_mgmt_cmd *tsk;
3184 struct sts_entry_24xx *sts;
3186 scsi_qla_host_t *vha;
3187 struct qla_hw_data *ha;
3188 struct req_que *req;
3189 struct rsp_que *rsp;
3190 struct qla_qpair *qpair;
3196 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3197 "Entered %s.\n", __func__);
3199 if (vha->vp_idx && vha->qpair) {
3208 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3210 ql_log(ql_log_warn, vha, 0x1093,
3211 "Failed to allocate task management IOCB.\n");
3212 return QLA_MEMORY_ALLOC_FAILED;
3215 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3216 tsk->p.tsk.entry_count = 1;
3217 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3218 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3219 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3220 tsk->p.tsk.control_flags = cpu_to_le32(type);
3221 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3222 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3223 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3224 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3225 if (type == TCF_LUN_RESET) {
3226 int_to_scsilun(l, &tsk->p.tsk.lun);
3227 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3228 sizeof(tsk->p.tsk.lun));
3232 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3233 if (rval != QLA_SUCCESS) {
3234 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3235 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3236 } else if (sts->entry_status != 0) {
3237 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3238 "Failed to complete IOCB -- error status (%x).\n",
3240 rval = QLA_FUNCTION_FAILED;
3241 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3242 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3243 "Failed to complete IOCB -- completion status (%x).\n",
3244 le16_to_cpu(sts->comp_status));
3245 rval = QLA_FUNCTION_FAILED;
3246 } else if (le16_to_cpu(sts->scsi_status) &
3247 SS_RESPONSE_INFO_LEN_VALID) {
3248 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3249 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3250 "Ignoring inconsistent data length -- not enough "
3251 "response info (%d).\n",
3252 le32_to_cpu(sts->rsp_data_len));
3253 } else if (sts->data[3]) {
3254 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3255 "Failed to complete IOCB -- response (%x).\n",
3257 rval = QLA_FUNCTION_FAILED;
3261 /* Issue marker IOCB. */
3262 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3263 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3264 if (rval2 != QLA_SUCCESS) {
3265 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3266 "Failed to issue marker IOCB (%x).\n", rval2);
3268 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3269 "Done %s.\n", __func__);
3272 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3278 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3280 struct qla_hw_data *ha = fcport->vha->hw;
3282 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3283 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3285 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3289 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3291 struct qla_hw_data *ha = fcport->vha->hw;
3293 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3294 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3296 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3300 qla2x00_system_error(scsi_qla_host_t *vha)
3304 mbx_cmd_t *mcp = &mc;
3305 struct qla_hw_data *ha = vha->hw;
3307 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3308 return QLA_FUNCTION_FAILED;
3310 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3311 "Entered %s.\n", __func__);
3313 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3314 mcp->out_mb = MBX_0;
3318 rval = qla2x00_mailbox_command(vha, mcp);
3320 if (rval != QLA_SUCCESS) {
3321 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3323 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3324 "Done %s.\n", __func__);
3331 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3335 mbx_cmd_t *mcp = &mc;
3337 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3338 !IS_QLA27XX(vha->hw))
3339 return QLA_FUNCTION_FAILED;
3341 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3342 "Entered %s.\n", __func__);
3344 mcp->mb[0] = MBC_WRITE_SERDES;
3346 if (IS_QLA2031(vha->hw))
3347 mcp->mb[2] = data & 0xff;
3352 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3354 mcp->tov = MBX_TOV_SECONDS;
3356 rval = qla2x00_mailbox_command(vha, mcp);
3358 if (rval != QLA_SUCCESS) {
3359 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3360 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3363 "Done %s.\n", __func__);
3370 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3374 mbx_cmd_t *mcp = &mc;
3376 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3377 !IS_QLA27XX(vha->hw))
3378 return QLA_FUNCTION_FAILED;
3380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3381 "Entered %s.\n", __func__);
3383 mcp->mb[0] = MBC_READ_SERDES;
3386 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3387 mcp->in_mb = MBX_1|MBX_0;
3388 mcp->tov = MBX_TOV_SECONDS;
3390 rval = qla2x00_mailbox_command(vha, mcp);
3392 if (IS_QLA2031(vha->hw))
3393 *data = mcp->mb[1] & 0xff;
3397 if (rval != QLA_SUCCESS) {
3398 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3399 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3402 "Done %s.\n", __func__);
3409 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3413 mbx_cmd_t *mcp = &mc;
3415 if (!IS_QLA8044(vha->hw))
3416 return QLA_FUNCTION_FAILED;
3418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3419 "Entered %s.\n", __func__);
3421 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3422 mcp->mb[1] = HCS_WRITE_SERDES;
3423 mcp->mb[3] = LSW(addr);
3424 mcp->mb[4] = MSW(addr);
3425 mcp->mb[5] = LSW(data);
3426 mcp->mb[6] = MSW(data);
3427 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3429 mcp->tov = MBX_TOV_SECONDS;
3431 rval = qla2x00_mailbox_command(vha, mcp);
3433 if (rval != QLA_SUCCESS) {
3434 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3435 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3437 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3438 "Done %s.\n", __func__);
3445 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3449 mbx_cmd_t *mcp = &mc;
3451 if (!IS_QLA8044(vha->hw))
3452 return QLA_FUNCTION_FAILED;
3454 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3455 "Entered %s.\n", __func__);
3457 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3458 mcp->mb[1] = HCS_READ_SERDES;
3459 mcp->mb[3] = LSW(addr);
3460 mcp->mb[4] = MSW(addr);
3461 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3462 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3463 mcp->tov = MBX_TOV_SECONDS;
3465 rval = qla2x00_mailbox_command(vha, mcp);
3467 *data = mcp->mb[2] << 16 | mcp->mb[1];
3469 if (rval != QLA_SUCCESS) {
3470 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3471 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3474 "Done %s.\n", __func__);
3481 * qla2x00_set_serdes_params() -
3483 * @sw_em_1g: serial link options
3484 * @sw_em_2g: serial link options
3485 * @sw_em_4g: serial link options
3490 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3491 uint16_t sw_em_2g, uint16_t sw_em_4g)
3495 mbx_cmd_t *mcp = &mc;
3497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3498 "Entered %s.\n", __func__);
3500 mcp->mb[0] = MBC_SERDES_PARAMS;
3502 mcp->mb[2] = sw_em_1g | BIT_15;
3503 mcp->mb[3] = sw_em_2g | BIT_15;
3504 mcp->mb[4] = sw_em_4g | BIT_15;
3505 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3507 mcp->tov = MBX_TOV_SECONDS;
3509 rval = qla2x00_mailbox_command(vha, mcp);
3511 if (rval != QLA_SUCCESS) {
3513 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3514 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3518 "Done %s.\n", __func__);
3525 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3529 mbx_cmd_t *mcp = &mc;
3531 if (!IS_FWI2_CAPABLE(vha->hw))
3532 return QLA_FUNCTION_FAILED;
3534 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3535 "Entered %s.\n", __func__);
3537 mcp->mb[0] = MBC_STOP_FIRMWARE;
3539 mcp->out_mb = MBX_1|MBX_0;
3543 rval = qla2x00_mailbox_command(vha, mcp);
3545 if (rval != QLA_SUCCESS) {
3546 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3547 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3548 rval = QLA_INVALID_COMMAND;
3550 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3551 "Done %s.\n", __func__);
3558 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3563 mbx_cmd_t *mcp = &mc;
3565 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3566 "Entered %s.\n", __func__);
3568 if (!IS_FWI2_CAPABLE(vha->hw))
3569 return QLA_FUNCTION_FAILED;
3571 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3572 return QLA_FUNCTION_FAILED;
3574 mcp->mb[0] = MBC_TRACE_CONTROL;
3575 mcp->mb[1] = TC_EFT_ENABLE;
3576 mcp->mb[2] = LSW(eft_dma);
3577 mcp->mb[3] = MSW(eft_dma);
3578 mcp->mb[4] = LSW(MSD(eft_dma));
3579 mcp->mb[5] = MSW(MSD(eft_dma));
3580 mcp->mb[6] = buffers;
3581 mcp->mb[7] = TC_AEN_DISABLE;
3582 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3583 mcp->in_mb = MBX_1|MBX_0;
3584 mcp->tov = MBX_TOV_SECONDS;
3586 rval = qla2x00_mailbox_command(vha, mcp);
3587 if (rval != QLA_SUCCESS) {
3588 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3589 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3590 rval, mcp->mb[0], mcp->mb[1]);
3592 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3593 "Done %s.\n", __func__);
3600 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3604 mbx_cmd_t *mcp = &mc;
3606 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3607 "Entered %s.\n", __func__);
3609 if (!IS_FWI2_CAPABLE(vha->hw))
3610 return QLA_FUNCTION_FAILED;
3612 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3613 return QLA_FUNCTION_FAILED;
3615 mcp->mb[0] = MBC_TRACE_CONTROL;
3616 mcp->mb[1] = TC_EFT_DISABLE;
3617 mcp->out_mb = MBX_1|MBX_0;
3618 mcp->in_mb = MBX_1|MBX_0;
3619 mcp->tov = MBX_TOV_SECONDS;
3621 rval = qla2x00_mailbox_command(vha, mcp);
3622 if (rval != QLA_SUCCESS) {
3623 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3624 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3625 rval, mcp->mb[0], mcp->mb[1]);
3627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3628 "Done %s.\n", __func__);
3635 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3636 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3640 mbx_cmd_t *mcp = &mc;
3642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3643 "Entered %s.\n", __func__);
3645 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3646 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3647 return QLA_FUNCTION_FAILED;
3649 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3650 return QLA_FUNCTION_FAILED;
3652 mcp->mb[0] = MBC_TRACE_CONTROL;
3653 mcp->mb[1] = TC_FCE_ENABLE;
3654 mcp->mb[2] = LSW(fce_dma);
3655 mcp->mb[3] = MSW(fce_dma);
3656 mcp->mb[4] = LSW(MSD(fce_dma));
3657 mcp->mb[5] = MSW(MSD(fce_dma));
3658 mcp->mb[6] = buffers;
3659 mcp->mb[7] = TC_AEN_DISABLE;
3661 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3662 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3663 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3665 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3666 mcp->tov = MBX_TOV_SECONDS;
3668 rval = qla2x00_mailbox_command(vha, mcp);
3669 if (rval != QLA_SUCCESS) {
3670 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3671 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3672 rval, mcp->mb[0], mcp->mb[1]);
3674 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3675 "Done %s.\n", __func__);
3678 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3687 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3691 mbx_cmd_t *mcp = &mc;
3693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3694 "Entered %s.\n", __func__);
3696 if (!IS_FWI2_CAPABLE(vha->hw))
3697 return QLA_FUNCTION_FAILED;
3699 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3700 return QLA_FUNCTION_FAILED;
3702 mcp->mb[0] = MBC_TRACE_CONTROL;
3703 mcp->mb[1] = TC_FCE_DISABLE;
3704 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3705 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3706 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3708 mcp->tov = MBX_TOV_SECONDS;
3710 rval = qla2x00_mailbox_command(vha, mcp);
3711 if (rval != QLA_SUCCESS) {
3712 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3713 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3714 rval, mcp->mb[0], mcp->mb[1]);
3716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3717 "Done %s.\n", __func__);
3720 *wr = (uint64_t) mcp->mb[5] << 48 |
3721 (uint64_t) mcp->mb[4] << 32 |
3722 (uint64_t) mcp->mb[3] << 16 |
3723 (uint64_t) mcp->mb[2];
3725 *rd = (uint64_t) mcp->mb[9] << 48 |
3726 (uint64_t) mcp->mb[8] << 32 |
3727 (uint64_t) mcp->mb[7] << 16 |
3728 (uint64_t) mcp->mb[6];
3735 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3736 uint16_t *port_speed, uint16_t *mb)
3740 mbx_cmd_t *mcp = &mc;
3742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3743 "Entered %s.\n", __func__);
3745 if (!IS_IIDMA_CAPABLE(vha->hw))
3746 return QLA_FUNCTION_FAILED;
3748 mcp->mb[0] = MBC_PORT_PARAMS;
3749 mcp->mb[1] = loop_id;
3750 mcp->mb[2] = mcp->mb[3] = 0;
3751 mcp->mb[9] = vha->vp_idx;
3752 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3753 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3754 mcp->tov = MBX_TOV_SECONDS;
3756 rval = qla2x00_mailbox_command(vha, mcp);
3758 /* Return mailbox statuses. */
3765 if (rval != QLA_SUCCESS) {
3766 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3769 "Done %s.\n", __func__);
3771 *port_speed = mcp->mb[3];
3778 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3779 uint16_t port_speed, uint16_t *mb)
3783 mbx_cmd_t *mcp = &mc;
3785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3786 "Entered %s.\n", __func__);
3788 if (!IS_IIDMA_CAPABLE(vha->hw))
3789 return QLA_FUNCTION_FAILED;
3791 mcp->mb[0] = MBC_PORT_PARAMS;
3792 mcp->mb[1] = loop_id;
3794 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3795 mcp->mb[9] = vha->vp_idx;
3796 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3797 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3798 mcp->tov = MBX_TOV_SECONDS;
3800 rval = qla2x00_mailbox_command(vha, mcp);
3802 /* Return mailbox statuses. */
3809 if (rval != QLA_SUCCESS) {
3810 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3811 "Failed=%x.\n", rval);
3813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3814 "Done %s.\n", __func__);
3821 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3822 struct vp_rpt_id_entry_24xx *rptid_entry)
3824 struct qla_hw_data *ha = vha->hw;
3825 scsi_qla_host_t *vp = NULL;
3826 unsigned long flags;
3829 struct fc_port *fcport;
3831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3832 "Entered %s.\n", __func__);
3834 if (rptid_entry->entry_status != 0)
3837 id.b.domain = rptid_entry->port_id[2];
3838 id.b.area = rptid_entry->port_id[1];
3839 id.b.al_pa = rptid_entry->port_id[0];
3841 ha->flags.n2n_ae = 0;
3843 if (rptid_entry->format == 0) {
3845 ql_dbg(ql_dbg_async, vha, 0x10b7,
3846 "Format 0 : Number of VPs setup %d, number of "
3847 "VPs acquired %d.\n", rptid_entry->vp_setup,
3848 rptid_entry->vp_acquired);
3849 ql_dbg(ql_dbg_async, vha, 0x10b8,
3850 "Primary port id %02x%02x%02x.\n",
3851 rptid_entry->port_id[2], rptid_entry->port_id[1],
3852 rptid_entry->port_id[0]);
3853 ha->current_topology = ISP_CFG_NL;
3854 qlt_update_host_map(vha, id);
3856 } else if (rptid_entry->format == 1) {
3858 ql_dbg(ql_dbg_async, vha, 0x10b9,
3859 "Format 1: VP[%d] enabled - status %d - with "
3860 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3861 rptid_entry->vp_status,
3862 rptid_entry->port_id[2], rptid_entry->port_id[1],
3863 rptid_entry->port_id[0]);
3864 ql_dbg(ql_dbg_async, vha, 0x5075,
3865 "Format 1: Remote WWPN %8phC.\n",
3866 rptid_entry->u.f1.port_name);
3868 ql_dbg(ql_dbg_async, vha, 0x5075,
3869 "Format 1: WWPN %8phC.\n",
3872 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3874 ha->current_topology = ISP_CFG_N;
3875 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3876 fcport = qla2x00_find_fcport_by_wwpn(vha,
3877 rptid_entry->u.f1.port_name, 1);
3878 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3881 fcport->plogi_nack_done_deadline = jiffies + HZ;
3882 fcport->dm_login_expire = jiffies + 3*HZ;
3883 fcport->scan_state = QLA_FCPORT_FOUND;
3884 switch (fcport->disc_state) {
3886 set_bit(RELOGIN_NEEDED,
3889 case DSC_DELETE_PEND:
3892 qlt_schedule_sess_for_deletion(fcport);
3897 if (wwn_to_u64(vha->port_name) >
3898 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3900 vha->d_id.b.al_pa = 1;
3901 ha->flags.n2n_bigger = 1;
3904 ql_dbg(ql_dbg_async, vha, 0x5075,
3905 "Format 1: assign local id %x remote id %x\n",
3906 vha->d_id.b24, id.b24);
3908 ql_dbg(ql_dbg_async, vha, 0x5075,
3909 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3910 rptid_entry->u.f1.port_name);
3911 ha->flags.n2n_bigger = 0;
3913 qla24xx_post_newsess_work(vha, &id,
3914 rptid_entry->u.f1.port_name,
3915 rptid_entry->u.f1.node_name,
3920 /* if our portname is higher then initiate N2N login */
3922 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3923 ha->flags.n2n_ae = 1;
3927 ha->current_topology = ISP_CFG_FL;
3930 ha->current_topology = ISP_CFG_F;
3936 ha->flags.gpsc_supported = 1;
3937 ha->current_topology = ISP_CFG_F;
3938 /* buffer to buffer credit flag */
3939 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3941 if (rptid_entry->vp_idx == 0) {
3942 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3943 /* FA-WWN is only for physical port */
3944 if (qla_ini_mode_enabled(vha) &&
3945 ha->flags.fawwpn_enabled &&
3946 (rptid_entry->u.f1.flags &
3948 memcpy(vha->port_name,
3949 rptid_entry->u.f1.port_name,
3953 qlt_update_host_map(vha, id);
3956 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3957 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3959 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3960 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3961 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3962 "Could not acquire ID for VP[%d].\n",
3963 rptid_entry->vp_idx);
3968 spin_lock_irqsave(&ha->vport_slock, flags);
3969 list_for_each_entry(vp, &ha->vp_list, list) {
3970 if (rptid_entry->vp_idx == vp->vp_idx) {
3975 spin_unlock_irqrestore(&ha->vport_slock, flags);
3980 qlt_update_host_map(vp, id);
3983 * Cannot configure here as we are still sitting on the
3984 * response queue. Handle it in dpc context.
3986 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3987 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3988 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3990 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3991 qla2xxx_wake_dpc(vha);
3992 } else if (rptid_entry->format == 2) {
3993 ql_dbg(ql_dbg_async, vha, 0x505f,
3994 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3995 rptid_entry->port_id[2], rptid_entry->port_id[1],
3996 rptid_entry->port_id[0]);
3998 ql_dbg(ql_dbg_async, vha, 0x5075,
3999 "N2N: Remote WWPN %8phC.\n",
4000 rptid_entry->u.f2.port_name);
4002 /* N2N. direct connect */
4003 ha->current_topology = ISP_CFG_N;
4004 ha->flags.rida_fmt2 = 1;
4005 vha->d_id.b.domain = rptid_entry->port_id[2];
4006 vha->d_id.b.area = rptid_entry->port_id[1];
4007 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4009 ha->flags.n2n_ae = 1;
4010 spin_lock_irqsave(&ha->vport_slock, flags);
4011 qlt_update_vp_map(vha, SET_AL_PA);
4012 spin_unlock_irqrestore(&ha->vport_slock, flags);
4014 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4015 fcport->scan_state = QLA_FCPORT_SCAN;
4018 fcport = qla2x00_find_fcport_by_wwpn(vha,
4019 rptid_entry->u.f2.port_name, 1);
4022 fcport->login_retry = vha->hw->login_retry_count;
4023 fcport->plogi_nack_done_deadline = jiffies + HZ;
4024 fcport->scan_state = QLA_FCPORT_FOUND;
4030 * qla24xx_modify_vp_config
4031 * Change VP configuration for vha
4034 * vha = adapter block pointer.
4037 * qla2xxx local function return status code.
4043 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4046 struct vp_config_entry_24xx *vpmod;
4047 dma_addr_t vpmod_dma;
4048 struct qla_hw_data *ha = vha->hw;
4049 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4051 /* This can be called by the parent */
4053 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4054 "Entered %s.\n", __func__);
4056 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4058 ql_log(ql_log_warn, vha, 0x10bc,
4059 "Failed to allocate modify VP IOCB.\n");
4060 return QLA_MEMORY_ALLOC_FAILED;
4063 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4064 vpmod->entry_count = 1;
4065 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4066 vpmod->vp_count = 1;
4067 vpmod->vp_index1 = vha->vp_idx;
4068 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4070 qlt_modify_vp_config(vha, vpmod);
4072 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4073 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4074 vpmod->entry_count = 1;
4076 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4077 if (rval != QLA_SUCCESS) {
4078 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4079 "Failed to issue VP config IOCB (%x).\n", rval);
4080 } else if (vpmod->comp_status != 0) {
4081 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4082 "Failed to complete IOCB -- error status (%x).\n",
4083 vpmod->comp_status);
4084 rval = QLA_FUNCTION_FAILED;
4085 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4086 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4087 "Failed to complete IOCB -- completion status (%x).\n",
4088 le16_to_cpu(vpmod->comp_status));
4089 rval = QLA_FUNCTION_FAILED;
4092 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4093 "Done %s.\n", __func__);
4094 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4096 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4102 * qla2x00_send_change_request
4103 * Receive or disable RSCN request from fabric controller
4106 * ha = adapter block pointer
4107 * format = registration format:
4109 * 1 - Fabric detected registration
4110 * 2 - N_port detected registration
4111 * 3 - Full registration
4112 * FF - clear registration
4113 * vp_idx = Virtual port index
4116 * qla2x00 local function return status code.
4123 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4128 mbx_cmd_t *mcp = &mc;
4130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4131 "Entered %s.\n", __func__);
4133 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4134 mcp->mb[1] = format;
4135 mcp->mb[9] = vp_idx;
4136 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4137 mcp->in_mb = MBX_0|MBX_1;
4138 mcp->tov = MBX_TOV_SECONDS;
4140 rval = qla2x00_mailbox_command(vha, mcp);
4142 if (rval == QLA_SUCCESS) {
4143 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4153 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4158 mbx_cmd_t *mcp = &mc;
4160 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4161 "Entered %s.\n", __func__);
4163 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4164 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4165 mcp->mb[8] = MSW(addr);
4166 mcp->out_mb = MBX_8|MBX_0;
4168 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4169 mcp->out_mb = MBX_0;
4171 mcp->mb[1] = LSW(addr);
4172 mcp->mb[2] = MSW(req_dma);
4173 mcp->mb[3] = LSW(req_dma);
4174 mcp->mb[6] = MSW(MSD(req_dma));
4175 mcp->mb[7] = LSW(MSD(req_dma));
4176 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4177 if (IS_FWI2_CAPABLE(vha->hw)) {
4178 mcp->mb[4] = MSW(size);
4179 mcp->mb[5] = LSW(size);
4180 mcp->out_mb |= MBX_5|MBX_4;
4182 mcp->mb[4] = LSW(size);
4183 mcp->out_mb |= MBX_4;
4187 mcp->tov = MBX_TOV_SECONDS;
4189 rval = qla2x00_mailbox_command(vha, mcp);
4191 if (rval != QLA_SUCCESS) {
4192 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4193 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4195 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4196 "Done %s.\n", __func__);
4201 /* 84XX Support **************************************************************/
4203 struct cs84xx_mgmt_cmd {
4205 struct verify_chip_entry_84xx req;
4206 struct verify_chip_rsp_84xx rsp;
4211 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4214 struct cs84xx_mgmt_cmd *mn;
4217 unsigned long flags;
4218 struct qla_hw_data *ha = vha->hw;
4220 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4221 "Entered %s.\n", __func__);
4223 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4225 return QLA_MEMORY_ALLOC_FAILED;
4229 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4230 /* Diagnostic firmware? */
4231 /* options |= MENLO_DIAG_FW; */
4232 /* We update the firmware with only one data sequence. */
4233 options |= VCO_END_OF_DATA;
4237 memset(mn, 0, sizeof(*mn));
4238 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4239 mn->p.req.entry_count = 1;
4240 mn->p.req.options = cpu_to_le16(options);
4242 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4243 "Dump of Verify Request.\n");
4244 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4245 (uint8_t *)mn, sizeof(*mn));
4247 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4248 if (rval != QLA_SUCCESS) {
4249 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4250 "Failed to issue verify IOCB (%x).\n", rval);
4254 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4255 "Dump of Verify Response.\n");
4256 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4257 (uint8_t *)mn, sizeof(*mn));
4259 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4260 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4261 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4262 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4263 "cs=%x fc=%x.\n", status[0], status[1]);
4265 if (status[0] != CS_COMPLETE) {
4266 rval = QLA_FUNCTION_FAILED;
4267 if (!(options & VCO_DONT_UPDATE_FW)) {
4268 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4269 "Firmware update failed. Retrying "
4270 "without update firmware.\n");
4271 options |= VCO_DONT_UPDATE_FW;
4272 options &= ~VCO_FORCE_UPDATE;
4276 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4277 "Firmware updated to %x.\n",
4278 le32_to_cpu(mn->p.rsp.fw_ver));
4280 /* NOTE: we only update OP firmware. */
4281 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4282 ha->cs84xx->op_fw_version =
4283 le32_to_cpu(mn->p.rsp.fw_ver);
4284 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4290 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4292 if (rval != QLA_SUCCESS) {
4293 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4294 "Failed=%x.\n", rval);
4296 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4297 "Done %s.\n", __func__);
4304 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4307 unsigned long flags;
4309 mbx_cmd_t *mcp = &mc;
4310 struct qla_hw_data *ha = vha->hw;
4312 if (!ha->flags.fw_started)
4315 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4316 "Entered %s.\n", __func__);
4318 if (IS_SHADOW_REG_CAPABLE(ha))
4319 req->options |= BIT_13;
4321 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4322 mcp->mb[1] = req->options;
4323 mcp->mb[2] = MSW(LSD(req->dma));
4324 mcp->mb[3] = LSW(LSD(req->dma));
4325 mcp->mb[6] = MSW(MSD(req->dma));
4326 mcp->mb[7] = LSW(MSD(req->dma));
4327 mcp->mb[5] = req->length;
4329 mcp->mb[10] = req->rsp->id;
4330 mcp->mb[12] = req->qos;
4331 mcp->mb[11] = req->vp_idx;
4332 mcp->mb[13] = req->rid;
4333 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4336 mcp->mb[4] = req->id;
4337 /* que in ptr index */
4339 /* que out ptr index */
4340 mcp->mb[9] = *req->out_ptr = 0;
4341 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4342 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4344 mcp->flags = MBX_DMA_OUT;
4345 mcp->tov = MBX_TOV_SECONDS * 2;
4347 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4348 mcp->in_mb |= MBX_1;
4349 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4350 mcp->out_mb |= MBX_15;
4351 /* debug q create issue in SR-IOV */
4352 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4355 spin_lock_irqsave(&ha->hardware_lock, flags);
4356 if (!(req->options & BIT_0)) {
4357 WRT_REG_DWORD(req->req_q_in, 0);
4358 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4359 WRT_REG_DWORD(req->req_q_out, 0);
4361 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4363 rval = qla2x00_mailbox_command(vha, mcp);
4364 if (rval != QLA_SUCCESS) {
4365 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4366 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4369 "Done %s.\n", __func__);
4376 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4379 unsigned long flags;
4381 mbx_cmd_t *mcp = &mc;
4382 struct qla_hw_data *ha = vha->hw;
4384 if (!ha->flags.fw_started)
4387 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4388 "Entered %s.\n", __func__);
4390 if (IS_SHADOW_REG_CAPABLE(ha))
4391 rsp->options |= BIT_13;
4393 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4394 mcp->mb[1] = rsp->options;
4395 mcp->mb[2] = MSW(LSD(rsp->dma));
4396 mcp->mb[3] = LSW(LSD(rsp->dma));
4397 mcp->mb[6] = MSW(MSD(rsp->dma));
4398 mcp->mb[7] = LSW(MSD(rsp->dma));
4399 mcp->mb[5] = rsp->length;
4400 mcp->mb[14] = rsp->msix->entry;
4401 mcp->mb[13] = rsp->rid;
4402 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4405 mcp->mb[4] = rsp->id;
4406 /* que in ptr index */
4407 mcp->mb[8] = *rsp->in_ptr = 0;
4408 /* que out ptr index */
4410 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4411 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4413 mcp->flags = MBX_DMA_OUT;
4414 mcp->tov = MBX_TOV_SECONDS * 2;
4416 if (IS_QLA81XX(ha)) {
4417 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4418 mcp->in_mb |= MBX_1;
4419 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4420 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4421 mcp->in_mb |= MBX_1;
4422 /* debug q create issue in SR-IOV */
4423 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4426 spin_lock_irqsave(&ha->hardware_lock, flags);
4427 if (!(rsp->options & BIT_0)) {
4428 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4429 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4430 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4433 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4435 rval = qla2x00_mailbox_command(vha, mcp);
4436 if (rval != QLA_SUCCESS) {
4437 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4438 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4440 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4441 "Done %s.\n", __func__);
4448 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4452 mbx_cmd_t *mcp = &mc;
4454 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4455 "Entered %s.\n", __func__);
4457 mcp->mb[0] = MBC_IDC_ACK;
4458 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4459 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4461 mcp->tov = MBX_TOV_SECONDS;
4463 rval = qla2x00_mailbox_command(vha, mcp);
4465 if (rval != QLA_SUCCESS) {
4466 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4467 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4470 "Done %s.\n", __func__);
4477 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4481 mbx_cmd_t *mcp = &mc;
4483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4484 "Entered %s.\n", __func__);
4486 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4487 !IS_QLA27XX(vha->hw))
4488 return QLA_FUNCTION_FAILED;
4490 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4491 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4492 mcp->out_mb = MBX_1|MBX_0;
4493 mcp->in_mb = MBX_1|MBX_0;
4494 mcp->tov = MBX_TOV_SECONDS;
4496 rval = qla2x00_mailbox_command(vha, mcp);
4498 if (rval != QLA_SUCCESS) {
4499 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4500 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4501 rval, mcp->mb[0], mcp->mb[1]);
4503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4504 "Done %s.\n", __func__);
4505 *sector_size = mcp->mb[1];
4512 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4516 mbx_cmd_t *mcp = &mc;
4518 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4519 !IS_QLA27XX(vha->hw))
4520 return QLA_FUNCTION_FAILED;
4522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4523 "Entered %s.\n", __func__);
4525 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4526 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4527 FAC_OPT_CMD_WRITE_PROTECT;
4528 mcp->out_mb = MBX_1|MBX_0;
4529 mcp->in_mb = MBX_1|MBX_0;
4530 mcp->tov = MBX_TOV_SECONDS;
4532 rval = qla2x00_mailbox_command(vha, mcp);
4534 if (rval != QLA_SUCCESS) {
4535 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4536 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4537 rval, mcp->mb[0], mcp->mb[1]);
4539 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4540 "Done %s.\n", __func__);
4547 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4551 mbx_cmd_t *mcp = &mc;
4553 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4554 !IS_QLA27XX(vha->hw))
4555 return QLA_FUNCTION_FAILED;
4557 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4558 "Entered %s.\n", __func__);
4560 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4561 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4562 mcp->mb[2] = LSW(start);
4563 mcp->mb[3] = MSW(start);
4564 mcp->mb[4] = LSW(finish);
4565 mcp->mb[5] = MSW(finish);
4566 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4567 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4568 mcp->tov = MBX_TOV_SECONDS;
4570 rval = qla2x00_mailbox_command(vha, mcp);
4572 if (rval != QLA_SUCCESS) {
4573 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4574 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4575 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4577 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4578 "Done %s.\n", __func__);
4585 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4589 mbx_cmd_t *mcp = &mc;
4591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4592 "Entered %s.\n", __func__);
4594 mcp->mb[0] = MBC_RESTART_MPI_FW;
4595 mcp->out_mb = MBX_0;
4596 mcp->in_mb = MBX_0|MBX_1;
4597 mcp->tov = MBX_TOV_SECONDS;
4599 rval = qla2x00_mailbox_command(vha, mcp);
4601 if (rval != QLA_SUCCESS) {
4602 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4603 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4604 rval, mcp->mb[0], mcp->mb[1]);
4606 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4607 "Done %s.\n", __func__);
4614 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4618 mbx_cmd_t *mcp = &mc;
4622 struct qla_hw_data *ha = vha->hw;
4624 if (!IS_P3P_TYPE(ha))
4625 return QLA_FUNCTION_FAILED;
4627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4628 "Entered %s.\n", __func__);
4630 str = (void *)version;
4631 len = strlen(version);
4633 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4634 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4635 mcp->out_mb = MBX_1|MBX_0;
4636 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4637 mcp->mb[i] = cpu_to_le16p(str);
4638 mcp->out_mb |= 1<<i;
4640 for (; i < 16; i++) {
4642 mcp->out_mb |= 1<<i;
4644 mcp->in_mb = MBX_1|MBX_0;
4645 mcp->tov = MBX_TOV_SECONDS;
4647 rval = qla2x00_mailbox_command(vha, mcp);
4649 if (rval != QLA_SUCCESS) {
4650 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4651 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4654 "Done %s.\n", __func__);
4661 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4665 mbx_cmd_t *mcp = &mc;
4670 struct qla_hw_data *ha = vha->hw;
4672 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4674 return QLA_FUNCTION_FAILED;
4676 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4677 "Entered %s.\n", __func__);
4679 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4681 ql_log(ql_log_warn, vha, 0x117f,
4682 "Failed to allocate driver version param.\n");
4683 return QLA_MEMORY_ALLOC_FAILED;
4686 memcpy(str, "\x7\x3\x11\x0", 4);
4688 len = dwlen * 4 - 4;
4689 memset(str + 4, 0, len);
4690 if (len > strlen(version))
4691 len = strlen(version);
4692 memcpy(str + 4, version, len);
4694 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4695 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4696 mcp->mb[2] = MSW(LSD(str_dma));
4697 mcp->mb[3] = LSW(LSD(str_dma));
4698 mcp->mb[6] = MSW(MSD(str_dma));
4699 mcp->mb[7] = LSW(MSD(str_dma));
4700 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4701 mcp->in_mb = MBX_1|MBX_0;
4702 mcp->tov = MBX_TOV_SECONDS;
4704 rval = qla2x00_mailbox_command(vha, mcp);
4706 if (rval != QLA_SUCCESS) {
4707 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4708 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4711 "Done %s.\n", __func__);
4714 dma_pool_free(ha->s_dma_pool, str, str_dma);
4720 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4721 void *buf, uint16_t bufsiz)
4725 mbx_cmd_t *mcp = &mc;
4728 if (!IS_FWI2_CAPABLE(vha->hw))
4729 return QLA_FUNCTION_FAILED;
4731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4732 "Entered %s.\n", __func__);
4734 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4735 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4736 mcp->mb[2] = MSW(buf_dma);
4737 mcp->mb[3] = LSW(buf_dma);
4738 mcp->mb[6] = MSW(MSD(buf_dma));
4739 mcp->mb[7] = LSW(MSD(buf_dma));
4740 mcp->mb[8] = bufsiz/4;
4741 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4742 mcp->in_mb = MBX_1|MBX_0;
4743 mcp->tov = MBX_TOV_SECONDS;
4745 rval = qla2x00_mailbox_command(vha, mcp);
4747 if (rval != QLA_SUCCESS) {
4748 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4749 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4751 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4752 "Done %s.\n", __func__);
4753 bp = (uint32_t *) buf;
4754 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4755 *bp = le32_to_cpu(*bp);
4762 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4766 mbx_cmd_t *mcp = &mc;
4768 if (!IS_FWI2_CAPABLE(vha->hw))
4769 return QLA_FUNCTION_FAILED;
4771 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4772 "Entered %s.\n", __func__);
4774 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4775 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4776 mcp->out_mb = MBX_1|MBX_0;
4777 mcp->in_mb = MBX_1|MBX_0;
4778 mcp->tov = MBX_TOV_SECONDS;
4780 rval = qla2x00_mailbox_command(vha, mcp);
4783 if (rval != QLA_SUCCESS) {
4784 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4785 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4787 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4788 "Done %s.\n", __func__);
4795 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4796 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4800 mbx_cmd_t *mcp = &mc;
4801 struct qla_hw_data *ha = vha->hw;
4803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4804 "Entered %s.\n", __func__);
4806 if (!IS_FWI2_CAPABLE(ha))
4807 return QLA_FUNCTION_FAILED;
4812 mcp->mb[0] = MBC_READ_SFP;
4814 mcp->mb[2] = MSW(sfp_dma);
4815 mcp->mb[3] = LSW(sfp_dma);
4816 mcp->mb[6] = MSW(MSD(sfp_dma));
4817 mcp->mb[7] = LSW(MSD(sfp_dma));
4821 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4822 mcp->in_mb = MBX_1|MBX_0;
4823 mcp->tov = MBX_TOV_SECONDS;
4825 rval = qla2x00_mailbox_command(vha, mcp);
4830 if (rval != QLA_SUCCESS) {
4831 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4832 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4833 if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4835 /* sfp is not there */
4836 rval = QLA_INTERFACE_ERROR;
4838 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4839 "Done %s.\n", __func__);
4846 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4847 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4851 mbx_cmd_t *mcp = &mc;
4852 struct qla_hw_data *ha = vha->hw;
4854 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4855 "Entered %s.\n", __func__);
4857 if (!IS_FWI2_CAPABLE(ha))
4858 return QLA_FUNCTION_FAILED;
4866 mcp->mb[0] = MBC_WRITE_SFP;
4868 mcp->mb[2] = MSW(sfp_dma);
4869 mcp->mb[3] = LSW(sfp_dma);
4870 mcp->mb[6] = MSW(MSD(sfp_dma));
4871 mcp->mb[7] = LSW(MSD(sfp_dma));
4875 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4876 mcp->in_mb = MBX_1|MBX_0;
4877 mcp->tov = MBX_TOV_SECONDS;
4879 rval = qla2x00_mailbox_command(vha, mcp);
4881 if (rval != QLA_SUCCESS) {
4882 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4883 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4886 "Done %s.\n", __func__);
4893 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4894 uint16_t size_in_bytes, uint16_t *actual_size)
4898 mbx_cmd_t *mcp = &mc;
4900 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4901 "Entered %s.\n", __func__);
4903 if (!IS_CNA_CAPABLE(vha->hw))
4904 return QLA_FUNCTION_FAILED;
4906 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4907 mcp->mb[2] = MSW(stats_dma);
4908 mcp->mb[3] = LSW(stats_dma);
4909 mcp->mb[6] = MSW(MSD(stats_dma));
4910 mcp->mb[7] = LSW(MSD(stats_dma));
4911 mcp->mb[8] = size_in_bytes >> 2;
4912 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4913 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4914 mcp->tov = MBX_TOV_SECONDS;
4916 rval = qla2x00_mailbox_command(vha, mcp);
4918 if (rval != QLA_SUCCESS) {
4919 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4920 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4921 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4924 "Done %s.\n", __func__);
4927 *actual_size = mcp->mb[2] << 2;
4934 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4939 mbx_cmd_t *mcp = &mc;
4941 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4942 "Entered %s.\n", __func__);
4944 if (!IS_CNA_CAPABLE(vha->hw))
4945 return QLA_FUNCTION_FAILED;
4947 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4949 mcp->mb[2] = MSW(tlv_dma);
4950 mcp->mb[3] = LSW(tlv_dma);
4951 mcp->mb[6] = MSW(MSD(tlv_dma));
4952 mcp->mb[7] = LSW(MSD(tlv_dma));
4954 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4955 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4956 mcp->tov = MBX_TOV_SECONDS;
4958 rval = qla2x00_mailbox_command(vha, mcp);
4960 if (rval != QLA_SUCCESS) {
4961 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4962 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4963 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4965 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4966 "Done %s.\n", __func__);
4973 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4977 mbx_cmd_t *mcp = &mc;
4979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4980 "Entered %s.\n", __func__);
4982 if (!IS_FWI2_CAPABLE(vha->hw))
4983 return QLA_FUNCTION_FAILED;
4985 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4986 mcp->mb[1] = LSW(risc_addr);
4987 mcp->mb[8] = MSW(risc_addr);
4988 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4989 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4992 rval = qla2x00_mailbox_command(vha, mcp);
4993 if (rval != QLA_SUCCESS) {
4994 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4995 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4997 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4998 "Done %s.\n", __func__);
4999 *data = mcp->mb[3] << 16 | mcp->mb[2];
5006 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5011 mbx_cmd_t *mcp = &mc;
5013 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5014 "Entered %s.\n", __func__);
5016 memset(mcp->mb, 0 , sizeof(mcp->mb));
5017 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5018 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5020 /* transfer count */
5021 mcp->mb[10] = LSW(mreq->transfer_size);
5022 mcp->mb[11] = MSW(mreq->transfer_size);
5024 /* send data address */
5025 mcp->mb[14] = LSW(mreq->send_dma);
5026 mcp->mb[15] = MSW(mreq->send_dma);
5027 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5028 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5030 /* receive data address */
5031 mcp->mb[16] = LSW(mreq->rcv_dma);
5032 mcp->mb[17] = MSW(mreq->rcv_dma);
5033 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5034 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5036 /* Iteration count */
5037 mcp->mb[18] = LSW(mreq->iteration_count);
5038 mcp->mb[19] = MSW(mreq->iteration_count);
5040 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5041 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5042 if (IS_CNA_CAPABLE(vha->hw))
5043 mcp->out_mb |= MBX_2;
5044 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5046 mcp->buf_size = mreq->transfer_size;
5047 mcp->tov = MBX_TOV_SECONDS;
5048 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5050 rval = qla2x00_mailbox_command(vha, mcp);
5052 if (rval != QLA_SUCCESS) {
5053 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5054 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5055 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5056 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5058 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5059 "Done %s.\n", __func__);
5062 /* Copy mailbox information */
5063 memcpy( mresp, mcp->mb, 64);
5068 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5073 mbx_cmd_t *mcp = &mc;
5074 struct qla_hw_data *ha = vha->hw;
5076 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5077 "Entered %s.\n", __func__);
5079 memset(mcp->mb, 0 , sizeof(mcp->mb));
5080 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5081 /* BIT_6 specifies 64bit address */
5082 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5083 if (IS_CNA_CAPABLE(ha)) {
5084 mcp->mb[2] = vha->fcoe_fcf_idx;
5086 mcp->mb[16] = LSW(mreq->rcv_dma);
5087 mcp->mb[17] = MSW(mreq->rcv_dma);
5088 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5089 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5091 mcp->mb[10] = LSW(mreq->transfer_size);
5093 mcp->mb[14] = LSW(mreq->send_dma);
5094 mcp->mb[15] = MSW(mreq->send_dma);
5095 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5096 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5098 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5099 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5100 if (IS_CNA_CAPABLE(ha))
5101 mcp->out_mb |= MBX_2;
5104 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5105 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5106 mcp->in_mb |= MBX_1;
5107 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5108 mcp->in_mb |= MBX_3;
5110 mcp->tov = MBX_TOV_SECONDS;
5111 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5112 mcp->buf_size = mreq->transfer_size;
5114 rval = qla2x00_mailbox_command(vha, mcp);
5116 if (rval != QLA_SUCCESS) {
5117 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5118 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5119 rval, mcp->mb[0], mcp->mb[1]);
5121 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5122 "Done %s.\n", __func__);
5125 /* Copy mailbox information */
5126 memcpy(mresp, mcp->mb, 64);
5131 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5135 mbx_cmd_t *mcp = &mc;
5137 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5138 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5140 mcp->mb[0] = MBC_ISP84XX_RESET;
5141 mcp->mb[1] = enable_diagnostic;
5142 mcp->out_mb = MBX_1|MBX_0;
5143 mcp->in_mb = MBX_1|MBX_0;
5144 mcp->tov = MBX_TOV_SECONDS;
5145 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5146 rval = qla2x00_mailbox_command(vha, mcp);
5148 if (rval != QLA_SUCCESS)
5149 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5151 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5152 "Done %s.\n", __func__);
5158 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5162 mbx_cmd_t *mcp = &mc;
5164 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5165 "Entered %s.\n", __func__);
5167 if (!IS_FWI2_CAPABLE(vha->hw))
5168 return QLA_FUNCTION_FAILED;
5170 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5171 mcp->mb[1] = LSW(risc_addr);
5172 mcp->mb[2] = LSW(data);
5173 mcp->mb[3] = MSW(data);
5174 mcp->mb[8] = MSW(risc_addr);
5175 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5179 rval = qla2x00_mailbox_command(vha, mcp);
5180 if (rval != QLA_SUCCESS) {
5181 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5182 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5185 "Done %s.\n", __func__);
5192 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5195 uint32_t stat, timer;
5197 struct qla_hw_data *ha = vha->hw;
5198 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5202 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5203 "Entered %s.\n", __func__);
5205 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5207 /* Write the MBC data to the registers */
5208 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5209 WRT_REG_WORD(®->mailbox1, mb[0]);
5210 WRT_REG_WORD(®->mailbox2, mb[1]);
5211 WRT_REG_WORD(®->mailbox3, mb[2]);
5212 WRT_REG_WORD(®->mailbox4, mb[3]);
5214 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
5216 /* Poll for MBC interrupt */
5217 for (timer = 6000000; timer; timer--) {
5218 /* Check for pending interrupts. */
5219 stat = RD_REG_DWORD(®->host_status);
5220 if (stat & HSRX_RISC_INT) {
5223 if (stat == 0x1 || stat == 0x2 ||
5224 stat == 0x10 || stat == 0x11) {
5225 set_bit(MBX_INTERRUPT,
5226 &ha->mbx_cmd_flags);
5227 mb0 = RD_REG_WORD(®->mailbox0);
5228 WRT_REG_DWORD(®->hccr,
5229 HCCRX_CLR_RISC_INT);
5230 RD_REG_DWORD(®->hccr);
5237 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5238 rval = mb0 & MBS_MASK;
5240 rval = QLA_FUNCTION_FAILED;
5242 if (rval != QLA_SUCCESS) {
5243 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5244 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5246 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5247 "Done %s.\n", __func__);
5254 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5258 mbx_cmd_t *mcp = &mc;
5259 struct qla_hw_data *ha = vha->hw;
5261 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5262 "Entered %s.\n", __func__);
5264 if (!IS_FWI2_CAPABLE(ha))
5265 return QLA_FUNCTION_FAILED;
5267 mcp->mb[0] = MBC_DATA_RATE;
5269 mcp->out_mb = MBX_1|MBX_0;
5270 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5271 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5272 mcp->in_mb |= MBX_3;
5273 mcp->tov = MBX_TOV_SECONDS;
5275 rval = qla2x00_mailbox_command(vha, mcp);
5276 if (rval != QLA_SUCCESS) {
5277 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5278 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5280 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5281 "Done %s.\n", __func__);
5282 if (mcp->mb[1] != 0x7)
5283 ha->link_data_rate = mcp->mb[1];
5290 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5294 mbx_cmd_t *mcp = &mc;
5295 struct qla_hw_data *ha = vha->hw;
5297 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5298 "Entered %s.\n", __func__);
5300 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5302 return QLA_FUNCTION_FAILED;
5303 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5304 mcp->out_mb = MBX_0;
5305 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5306 mcp->tov = MBX_TOV_SECONDS;
5309 rval = qla2x00_mailbox_command(vha, mcp);
5311 if (rval != QLA_SUCCESS) {
5312 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5313 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5315 /* Copy all bits to preserve original value */
5316 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5318 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5319 "Done %s.\n", __func__);
5325 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5329 mbx_cmd_t *mcp = &mc;
5331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5332 "Entered %s.\n", __func__);
5334 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5335 /* Copy all bits to preserve original setting */
5336 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5337 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5339 mcp->tov = MBX_TOV_SECONDS;
5341 rval = qla2x00_mailbox_command(vha, mcp);
5343 if (rval != QLA_SUCCESS) {
5344 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5345 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5347 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5348 "Done %s.\n", __func__);
5355 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5360 mbx_cmd_t *mcp = &mc;
5361 struct qla_hw_data *ha = vha->hw;
5363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5364 "Entered %s.\n", __func__);
5366 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5367 return QLA_FUNCTION_FAILED;
5369 mcp->mb[0] = MBC_PORT_PARAMS;
5370 mcp->mb[1] = loop_id;
5371 if (ha->flags.fcp_prio_enabled)
5375 mcp->mb[4] = priority & 0xf;
5376 mcp->mb[9] = vha->vp_idx;
5377 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5378 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5381 rval = qla2x00_mailbox_command(vha, mcp);
5389 if (rval != QLA_SUCCESS) {
5390 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5393 "Done %s.\n", __func__);
5400 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5402 int rval = QLA_FUNCTION_FAILED;
5403 struct qla_hw_data *ha = vha->hw;
5406 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5407 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5408 "Thermal not supported by this card.\n");
5412 if (IS_QLA25XX(ha)) {
5413 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5414 ha->pdev->subsystem_device == 0x0175) {
5415 rval = qla2x00_read_sfp(vha, 0, &byte,
5416 0x98, 0x1, 1, BIT_13|BIT_0);
5420 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5421 ha->pdev->subsystem_device == 0x338e) {
5422 rval = qla2x00_read_sfp(vha, 0, &byte,
5423 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5427 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5428 "Thermal not supported by this card.\n");
5432 if (IS_QLA82XX(ha)) {
5433 *temp = qla82xx_read_temperature(vha);
5436 } else if (IS_QLA8044(ha)) {
5437 *temp = qla8044_read_temperature(vha);
5442 rval = qla2x00_read_asic_temperature(vha, temp);
5447 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5450 struct qla_hw_data *ha = vha->hw;
5452 mbx_cmd_t *mcp = &mc;
5454 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5455 "Entered %s.\n", __func__);
5457 if (!IS_FWI2_CAPABLE(ha))
5458 return QLA_FUNCTION_FAILED;
5460 memset(mcp, 0, sizeof(mbx_cmd_t));
5461 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5464 mcp->out_mb = MBX_1|MBX_0;
5469 rval = qla2x00_mailbox_command(vha, mcp);
5470 if (rval != QLA_SUCCESS) {
5471 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5472 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5475 "Done %s.\n", __func__);
5482 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5485 struct qla_hw_data *ha = vha->hw;
5487 mbx_cmd_t *mcp = &mc;
5489 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5490 "Entered %s.\n", __func__);
5492 if (!IS_P3P_TYPE(ha))
5493 return QLA_FUNCTION_FAILED;
5495 memset(mcp, 0, sizeof(mbx_cmd_t));
5496 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5499 mcp->out_mb = MBX_1|MBX_0;
5504 rval = qla2x00_mailbox_command(vha, mcp);
5505 if (rval != QLA_SUCCESS) {
5506 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5507 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5510 "Done %s.\n", __func__);
5517 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5519 struct qla_hw_data *ha = vha->hw;
5521 mbx_cmd_t *mcp = &mc;
5522 int rval = QLA_FUNCTION_FAILED;
5524 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5525 "Entered %s.\n", __func__);
5527 memset(mcp->mb, 0 , sizeof(mcp->mb));
5528 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5529 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5530 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5531 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5533 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5534 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5535 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5537 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5538 mcp->tov = MBX_TOV_SECONDS;
5539 rval = qla2x00_mailbox_command(vha, mcp);
5541 /* Always copy back return mailbox values. */
5542 if (rval != QLA_SUCCESS) {
5543 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5544 "mailbox command FAILED=0x%x, subcode=%x.\n",
5545 (mcp->mb[1] << 16) | mcp->mb[0],
5546 (mcp->mb[3] << 16) | mcp->mb[2]);
5548 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5549 "Done %s.\n", __func__);
5550 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5551 if (!ha->md_template_size) {
5552 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5553 "Null template size obtained.\n");
5554 rval = QLA_FUNCTION_FAILED;
5561 qla82xx_md_get_template(scsi_qla_host_t *vha)
5563 struct qla_hw_data *ha = vha->hw;
5565 mbx_cmd_t *mcp = &mc;
5566 int rval = QLA_FUNCTION_FAILED;
5568 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5569 "Entered %s.\n", __func__);
5571 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5572 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5573 if (!ha->md_tmplt_hdr) {
5574 ql_log(ql_log_warn, vha, 0x1124,
5575 "Unable to allocate memory for Minidump template.\n");
5579 memset(mcp->mb, 0 , sizeof(mcp->mb));
5580 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5581 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5582 mcp->mb[2] = LSW(RQST_TMPLT);
5583 mcp->mb[3] = MSW(RQST_TMPLT);
5584 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5585 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5586 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5587 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5588 mcp->mb[8] = LSW(ha->md_template_size);
5589 mcp->mb[9] = MSW(ha->md_template_size);
5591 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5592 mcp->tov = MBX_TOV_SECONDS;
5593 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5594 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5595 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5596 rval = qla2x00_mailbox_command(vha, mcp);
5598 if (rval != QLA_SUCCESS) {
5599 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5600 "mailbox command FAILED=0x%x, subcode=%x.\n",
5601 ((mcp->mb[1] << 16) | mcp->mb[0]),
5602 ((mcp->mb[3] << 16) | mcp->mb[2]));
5604 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5605 "Done %s.\n", __func__);
5610 qla8044_md_get_template(scsi_qla_host_t *vha)
5612 struct qla_hw_data *ha = vha->hw;
5614 mbx_cmd_t *mcp = &mc;
5615 int rval = QLA_FUNCTION_FAILED;
5616 int offset = 0, size = MINIDUMP_SIZE_36K;
5617 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5618 "Entered %s.\n", __func__);
5620 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5621 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5622 if (!ha->md_tmplt_hdr) {
5623 ql_log(ql_log_warn, vha, 0xb11b,
5624 "Unable to allocate memory for Minidump template.\n");
5628 memset(mcp->mb, 0 , sizeof(mcp->mb));
5629 while (offset < ha->md_template_size) {
5630 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5631 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5632 mcp->mb[2] = LSW(RQST_TMPLT);
5633 mcp->mb[3] = MSW(RQST_TMPLT);
5634 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5635 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5636 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5637 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5638 mcp->mb[8] = LSW(size);
5639 mcp->mb[9] = MSW(size);
5640 mcp->mb[10] = offset & 0x0000FFFF;
5641 mcp->mb[11] = offset & 0xFFFF0000;
5642 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5643 mcp->tov = MBX_TOV_SECONDS;
5644 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5645 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5646 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5647 rval = qla2x00_mailbox_command(vha, mcp);
5649 if (rval != QLA_SUCCESS) {
5650 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5651 "mailbox command FAILED=0x%x, subcode=%x.\n",
5652 ((mcp->mb[1] << 16) | mcp->mb[0]),
5653 ((mcp->mb[3] << 16) | mcp->mb[2]));
5656 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5657 "Done %s.\n", __func__);
5658 offset = offset + size;
5664 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5667 struct qla_hw_data *ha = vha->hw;
5669 mbx_cmd_t *mcp = &mc;
5671 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5672 return QLA_FUNCTION_FAILED;
5674 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5675 "Entered %s.\n", __func__);
5677 memset(mcp, 0, sizeof(mbx_cmd_t));
5678 mcp->mb[0] = MBC_SET_LED_CONFIG;
5679 mcp->mb[1] = led_cfg[0];
5680 mcp->mb[2] = led_cfg[1];
5681 if (IS_QLA8031(ha)) {
5682 mcp->mb[3] = led_cfg[2];
5683 mcp->mb[4] = led_cfg[3];
5684 mcp->mb[5] = led_cfg[4];
5685 mcp->mb[6] = led_cfg[5];
5688 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5690 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5695 rval = qla2x00_mailbox_command(vha, mcp);
5696 if (rval != QLA_SUCCESS) {
5697 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5698 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5701 "Done %s.\n", __func__);
5708 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5711 struct qla_hw_data *ha = vha->hw;
5713 mbx_cmd_t *mcp = &mc;
5715 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5716 return QLA_FUNCTION_FAILED;
5718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5719 "Entered %s.\n", __func__);
5721 memset(mcp, 0, sizeof(mbx_cmd_t));
5722 mcp->mb[0] = MBC_GET_LED_CONFIG;
5724 mcp->out_mb = MBX_0;
5725 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5727 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5731 rval = qla2x00_mailbox_command(vha, mcp);
5732 if (rval != QLA_SUCCESS) {
5733 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5734 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5736 led_cfg[0] = mcp->mb[1];
5737 led_cfg[1] = mcp->mb[2];
5738 if (IS_QLA8031(ha)) {
5739 led_cfg[2] = mcp->mb[3];
5740 led_cfg[3] = mcp->mb[4];
5741 led_cfg[4] = mcp->mb[5];
5742 led_cfg[5] = mcp->mb[6];
5744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5745 "Done %s.\n", __func__);
5752 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5755 struct qla_hw_data *ha = vha->hw;
5757 mbx_cmd_t *mcp = &mc;
5759 if (!IS_P3P_TYPE(ha))
5760 return QLA_FUNCTION_FAILED;
5762 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5763 "Entered %s.\n", __func__);
5765 memset(mcp, 0, sizeof(mbx_cmd_t));
5766 mcp->mb[0] = MBC_SET_LED_CONFIG;
5772 mcp->out_mb = MBX_7|MBX_0;
5774 mcp->tov = MBX_TOV_SECONDS;
5777 rval = qla2x00_mailbox_command(vha, mcp);
5778 if (rval != QLA_SUCCESS) {
5779 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5780 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5783 "Done %s.\n", __func__);
5790 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5793 struct qla_hw_data *ha = vha->hw;
5795 mbx_cmd_t *mcp = &mc;
5797 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5798 return QLA_FUNCTION_FAILED;
5800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5801 "Entered %s.\n", __func__);
5803 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5804 mcp->mb[1] = LSW(reg);
5805 mcp->mb[2] = MSW(reg);
5806 mcp->mb[3] = LSW(data);
5807 mcp->mb[4] = MSW(data);
5808 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5810 mcp->in_mb = MBX_1|MBX_0;
5811 mcp->tov = MBX_TOV_SECONDS;
5813 rval = qla2x00_mailbox_command(vha, mcp);
5815 if (rval != QLA_SUCCESS) {
5816 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5817 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5820 "Done %s.\n", __func__);
5827 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5830 struct qla_hw_data *ha = vha->hw;
5832 mbx_cmd_t *mcp = &mc;
5834 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5836 "Implicit LOGO Unsupported.\n");
5837 return QLA_FUNCTION_FAILED;
5841 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5842 "Entering %s.\n", __func__);
5844 /* Perform Implicit LOGO. */
5845 mcp->mb[0] = MBC_PORT_LOGOUT;
5846 mcp->mb[1] = fcport->loop_id;
5847 mcp->mb[10] = BIT_15;
5848 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5850 mcp->tov = MBX_TOV_SECONDS;
5852 rval = qla2x00_mailbox_command(vha, mcp);
5853 if (rval != QLA_SUCCESS)
5854 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5855 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5858 "Done %s.\n", __func__);
5864 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5868 mbx_cmd_t *mcp = &mc;
5869 struct qla_hw_data *ha = vha->hw;
5870 unsigned long retry_max_time = jiffies + (2 * HZ);
5872 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5873 return QLA_FUNCTION_FAILED;
5875 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5878 mcp->mb[0] = MBC_READ_REMOTE_REG;
5879 mcp->mb[1] = LSW(reg);
5880 mcp->mb[2] = MSW(reg);
5881 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5882 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5883 mcp->tov = MBX_TOV_SECONDS;
5885 rval = qla2x00_mailbox_command(vha, mcp);
5887 if (rval != QLA_SUCCESS) {
5888 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5889 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5890 rval, mcp->mb[0], mcp->mb[1]);
5892 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5893 if (*data == QLA8XXX_BAD_VALUE) {
5895 * During soft-reset CAMRAM register reads might
5896 * return 0xbad0bad0. So retry for MAX of 2 sec
5897 * while reading camram registers.
5899 if (time_after(jiffies, retry_max_time)) {
5900 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5901 "Failure to read CAMRAM register. "
5902 "data=0x%x.\n", *data);
5903 return QLA_FUNCTION_FAILED;
5908 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5915 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5919 mbx_cmd_t *mcp = &mc;
5920 struct qla_hw_data *ha = vha->hw;
5922 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5923 return QLA_FUNCTION_FAILED;
5925 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5927 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5928 mcp->out_mb = MBX_0;
5929 mcp->in_mb = MBX_1|MBX_0;
5930 mcp->tov = MBX_TOV_SECONDS;
5932 rval = qla2x00_mailbox_command(vha, mcp);
5934 if (rval != QLA_SUCCESS) {
5935 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5936 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5937 rval, mcp->mb[0], mcp->mb[1]);
5938 ha->isp_ops->fw_dump(vha, 0);
5940 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5947 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5948 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5952 mbx_cmd_t *mcp = &mc;
5953 uint8_t subcode = (uint8_t)options;
5954 struct qla_hw_data *ha = vha->hw;
5956 if (!IS_QLA8031(ha))
5957 return QLA_FUNCTION_FAILED;
5959 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5961 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5962 mcp->mb[1] = options;
5963 mcp->out_mb = MBX_1|MBX_0;
5964 if (subcode & BIT_2) {
5965 mcp->mb[2] = LSW(start_addr);
5966 mcp->mb[3] = MSW(start_addr);
5967 mcp->mb[4] = LSW(end_addr);
5968 mcp->mb[5] = MSW(end_addr);
5969 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5971 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5972 if (!(subcode & (BIT_2 | BIT_5)))
5973 mcp->in_mb |= MBX_4|MBX_3;
5974 mcp->tov = MBX_TOV_SECONDS;
5976 rval = qla2x00_mailbox_command(vha, mcp);
5978 if (rval != QLA_SUCCESS) {
5979 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5980 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5981 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5983 ha->isp_ops->fw_dump(vha, 0);
5985 if (subcode & BIT_5)
5986 *sector_size = mcp->mb[1];
5987 else if (subcode & (BIT_6 | BIT_7)) {
5988 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5989 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5990 } else if (subcode & (BIT_3 | BIT_4)) {
5991 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5992 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5994 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6001 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6006 mbx_cmd_t *mcp = &mc;
6008 if (!IS_MCTP_CAPABLE(vha->hw))
6009 return QLA_FUNCTION_FAILED;
6011 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6012 "Entered %s.\n", __func__);
6014 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6015 mcp->mb[1] = LSW(addr);
6016 mcp->mb[2] = MSW(req_dma);
6017 mcp->mb[3] = LSW(req_dma);
6018 mcp->mb[4] = MSW(size);
6019 mcp->mb[5] = LSW(size);
6020 mcp->mb[6] = MSW(MSD(req_dma));
6021 mcp->mb[7] = LSW(MSD(req_dma));
6022 mcp->mb[8] = MSW(addr);
6023 /* Setting RAM ID to valid */
6024 mcp->mb[10] |= BIT_7;
6025 /* For MCTP RAM ID is 0x40 */
6026 mcp->mb[10] |= 0x40;
6028 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6032 mcp->tov = MBX_TOV_SECONDS;
6034 rval = qla2x00_mailbox_command(vha, mcp);
6036 if (rval != QLA_SUCCESS) {
6037 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6038 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6040 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6041 "Done %s.\n", __func__);
6048 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6049 void *dd_buf, uint size, uint options)
6053 mbx_cmd_t *mcp = &mc;
6056 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
6057 return QLA_FUNCTION_FAILED;
6059 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6060 "Entered %s.\n", __func__);
6062 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6063 dd_buf, size, DMA_FROM_DEVICE);
6064 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6065 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6066 return QLA_MEMORY_ALLOC_FAILED;
6069 memset(dd_buf, 0, size);
6071 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6072 mcp->mb[1] = options;
6073 mcp->mb[2] = MSW(LSD(dd_dma));
6074 mcp->mb[3] = LSW(LSD(dd_dma));
6075 mcp->mb[6] = MSW(MSD(dd_dma));
6076 mcp->mb[7] = LSW(MSD(dd_dma));
6078 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6079 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6080 mcp->buf_size = size;
6081 mcp->flags = MBX_DMA_IN;
6082 mcp->tov = MBX_TOV_SECONDS * 4;
6083 rval = qla2x00_mailbox_command(vha, mcp);
6085 if (rval != QLA_SUCCESS) {
6086 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6089 "Done %s.\n", __func__);
6092 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6093 size, DMA_FROM_DEVICE);
6098 static void qla2x00_async_mb_sp_done(void *s, int res)
6102 sp->u.iocb_cmd.u.mbx.rc = res;
6104 complete(&sp->u.iocb_cmd.u.mbx.comp);
6105 /* don't free sp here. Let the caller do the free */
6109 * This mailbox uses the iocb interface to send MB command.
6110 * This allows non-critial (non chip setup) command to go
6113 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6115 int rval = QLA_FUNCTION_FAILED;
6119 if (!vha->hw->flags.fw_started)
6122 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6126 sp->type = SRB_MB_IOCB;
6127 sp->name = mb_to_str(mcp->mb[0]);
6129 c = &sp->u.iocb_cmd;
6130 c->timeout = qla2x00_async_iocb_timeout;
6131 init_completion(&c->u.mbx.comp);
6133 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6135 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6137 sp->done = qla2x00_async_mb_sp_done;
6139 rval = qla2x00_start_sp(sp);
6140 if (rval != QLA_SUCCESS) {
6141 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6142 "%s: %s Failed submission. %x.\n",
6143 __func__, sp->name, rval);
6147 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6148 sp->name, sp->handle);
6150 wait_for_completion(&c->u.mbx.comp);
6151 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6155 case QLA_FUNCTION_TIMEOUT:
6156 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6157 __func__, sp->name, rval);
6160 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6161 __func__, sp->name);
6165 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6166 __func__, sp->name, rval);
6181 * NOTE: Do not call this routine from DPC thread
6183 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6185 int rval = QLA_FUNCTION_FAILED;
6187 struct port_database_24xx *pd;
6188 struct qla_hw_data *ha = vha->hw;
6191 if (!vha->hw->flags.fw_started)
6194 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6196 ql_log(ql_log_warn, vha, 0xd047,
6197 "Failed to allocate port database structure.\n");
6201 memset(&mc, 0, sizeof(mc));
6202 mc.mb[0] = MBC_GET_PORT_DATABASE;
6203 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6204 mc.mb[2] = MSW(pd_dma);
6205 mc.mb[3] = LSW(pd_dma);
6206 mc.mb[6] = MSW(MSD(pd_dma));
6207 mc.mb[7] = LSW(MSD(pd_dma));
6208 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6209 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6211 rval = qla24xx_send_mb_cmd(vha, &mc);
6212 if (rval != QLA_SUCCESS) {
6213 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6214 "%s: %8phC fail\n", __func__, fcport->port_name);
6218 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6220 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6221 __func__, fcport->port_name);
6225 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6230 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6231 struct port_database_24xx *pd)
6233 int rval = QLA_SUCCESS;
6235 u8 current_login_state, last_login_state;
6237 if (fcport->fc4f_nvme) {
6238 current_login_state = pd->current_login_state >> 4;
6239 last_login_state = pd->last_login_state >> 4;
6241 current_login_state = pd->current_login_state & 0xf;
6242 last_login_state = pd->last_login_state & 0xf;
6245 /* Check for logged in state. */
6246 if (current_login_state != PDS_PRLI_COMPLETE) {
6247 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6248 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6249 current_login_state, last_login_state, fcport->loop_id);
6250 rval = QLA_FUNCTION_FAILED;
6254 if (fcport->loop_id == FC_NO_LOOP_ID ||
6255 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6256 memcmp(fcport->port_name, pd->port_name, 8))) {
6257 /* We lost the device mid way. */
6258 rval = QLA_NOT_LOGGED_IN;
6262 /* Names are little-endian. */
6263 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6264 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6266 /* Get port_id of device. */
6267 fcport->d_id.b.domain = pd->port_id[0];
6268 fcport->d_id.b.area = pd->port_id[1];
6269 fcport->d_id.b.al_pa = pd->port_id[2];
6270 fcport->d_id.b.rsvd_1 = 0;
6272 if (fcport->fc4f_nvme) {
6273 fcport->nvme_prli_service_param =
6274 pd->prli_nvme_svc_param_word_3;
6275 fcport->port_type = FCT_NVME;
6277 /* If not target must be initiator or unknown type. */
6278 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6279 fcport->port_type = FCT_INITIATOR;
6281 fcport->port_type = FCT_TARGET;
6283 /* Passback COS information. */
6284 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6285 FC_COS_CLASS2 : FC_COS_CLASS3;
6287 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6288 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6289 fcport->conf_compl_supported = 1;
6297 * qla24xx_gidlist__wait
6298 * NOTE: don't call this routine from DPC thread.
6300 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6301 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6303 int rval = QLA_FUNCTION_FAILED;
6306 if (!vha->hw->flags.fw_started)
6309 memset(&mc, 0, sizeof(mc));
6310 mc.mb[0] = MBC_GET_ID_LIST;
6311 mc.mb[2] = MSW(id_list_dma);
6312 mc.mb[3] = LSW(id_list_dma);
6313 mc.mb[6] = MSW(MSD(id_list_dma));
6314 mc.mb[7] = LSW(MSD(id_list_dma));
6316 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6318 rval = qla24xx_send_mb_cmd(vha, &mc);
6319 if (rval != QLA_SUCCESS) {
6320 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6321 "%s: fail\n", __func__);
6323 *entries = mc.mb[1];
6324 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6325 "%s: done\n", __func__);
6331 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6335 mbx_cmd_t *mcp = &mc;
6337 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6338 "Entered %s\n", __func__);
6340 memset(mcp->mb, 0 , sizeof(mcp->mb));
6341 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6342 mcp->mb[1] = cpu_to_le16(1);
6343 mcp->mb[2] = cpu_to_le16(value);
6344 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6345 mcp->in_mb = MBX_2 | MBX_0;
6346 mcp->tov = MBX_TOV_SECONDS;
6349 rval = qla2x00_mailbox_command(vha, mcp);
6351 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6352 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6357 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6361 mbx_cmd_t *mcp = &mc;
6363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6364 "Entered %s\n", __func__);
6366 memset(mcp->mb, 0, sizeof(mcp->mb));
6367 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6368 mcp->mb[1] = cpu_to_le16(0);
6369 mcp->out_mb = MBX_1 | MBX_0;
6370 mcp->in_mb = MBX_2 | MBX_0;
6371 mcp->tov = MBX_TOV_SECONDS;
6374 rval = qla2x00_mailbox_command(vha, mcp);
6375 if (rval == QLA_SUCCESS)
6378 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6379 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6385 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6387 struct qla_hw_data *ha = vha->hw;
6388 uint16_t iter, addr, offset;
6389 dma_addr_t phys_addr;
6393 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6395 phys_addr = ha->sfp_data_dma;
6396 sfp_data = ha->sfp_data;
6399 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6401 /* Skip to next device address. */
6406 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6407 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6408 if (rval != QLA_SUCCESS) {
6409 ql_log(ql_log_warn, vha, 0x706d,
6410 "Unable to read SFP data (%x/%x/%x).\n", rval,
6416 if (buf && (c < count)) {
6419 if ((count - c) >= SFP_BLOCK_SIZE)
6420 sz = SFP_BLOCK_SIZE;
6424 memcpy(buf, sfp_data, sz);
6425 buf += SFP_BLOCK_SIZE;
6428 phys_addr += SFP_BLOCK_SIZE;
6429 sfp_data += SFP_BLOCK_SIZE;
6430 offset += SFP_BLOCK_SIZE;
6436 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6437 uint16_t *out_mb, int out_mb_sz)
6439 int rval = QLA_FUNCTION_FAILED;
6442 if (!vha->hw->flags.fw_started)
6445 memset(&mc, 0, sizeof(mc));
6446 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6448 rval = qla24xx_send_mb_cmd(vha, &mc);
6449 if (rval != QLA_SUCCESS) {
6450 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6451 "%s: fail\n", __func__);
6453 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6454 memcpy(out_mb, mc.mb, out_mb_sz);
6456 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6458 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6459 "%s: done\n", __func__);