2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds)
58 iocbs += (dsds - 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds)
80 iocbs += (dsds - 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
100 if (req->ring_index == req->length) {
102 req->ring_ptr = req->ring;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 cont_a64_entry_t *cont_pkt;
127 /* Adjust ring index. */
129 if (req->ring_index == req->length) {
131 req->ring_ptr = req->ring;
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 /* Load packet defaults. */
139 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151 /* We always use DIFF Bundling for best performance */
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 return scsi_prot_sg_count(cmd);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
201 cmd = GET_CMD_SP(sp);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
206 /* No data transfer */
207 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208 cmd_pkt->byte_count = cpu_to_le32(0);
213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215 /* Three DSDs are available in the Command Type 2 IOCB */
217 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219 /* Load data segments */
220 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221 cont_entry_t *cont_pkt;
223 /* Allocate additional continuation packets? */
224 if (avail_dsds == 0) {
226 * Seven DSDs are available in the Continuation
229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
235 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
241 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
242 * capable IOCB types.
244 * @sp: SRB command to process
245 * @cmd_pkt: Command type 3 IOCB
246 * @tot_dsds: Total number of segments to transfer
248 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
253 scsi_qla_host_t *vha;
254 struct scsi_cmnd *cmd;
255 struct scatterlist *sg;
258 cmd = GET_CMD_SP(sp);
260 /* Update entry type to indicate Command Type 3 IOCB */
261 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
263 /* No data transfer */
264 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
265 cmd_pkt->byte_count = cpu_to_le32(0);
270 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272 /* Two DSDs are available in the Command Type 3 IOCB */
274 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276 /* Load data segments */
277 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279 cont_a64_entry_t *cont_pkt;
281 /* Allocate additional continuation packets? */
282 if (avail_dsds == 0) {
284 * Five DSDs are available in the Continuation
287 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
288 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
292 sle_dma = sg_dma_address(sg);
293 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
294 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
301 * qla2x00_start_scsi() - Send a SCSI command to the ISP
302 * @sp: command to send to the ISP
304 * Returns non-zero if a failure occurred, else zero.
307 qla2x00_start_scsi(srb_t *sp)
311 scsi_qla_host_t *vha;
312 struct scsi_cmnd *cmd;
316 cmd_entry_t *cmd_pkt;
320 struct device_reg_2xxx __iomem *reg;
321 struct qla_hw_data *ha;
325 /* Setup device pointers. */
328 reg = &ha->iobase->isp;
329 cmd = GET_CMD_SP(sp);
330 req = ha->req_q_map[0];
331 rsp = ha->rsp_q_map[0];
332 /* So we know we haven't pci_map'ed anything yet */
335 /* Send marker if required */
336 if (vha->marker_needed != 0) {
337 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
339 return (QLA_FUNCTION_FAILED);
341 vha->marker_needed = 0;
344 /* Acquire ring specific lock */
345 spin_lock_irqsave(&ha->hardware_lock, flags);
347 /* Check for room in outstanding command list. */
348 handle = req->current_outstanding_cmd;
349 for (index = 1; index < req->num_outstanding_cmds; index++) {
351 if (handle == req->num_outstanding_cmds)
353 if (!req->outstanding_cmds[handle])
356 if (index == req->num_outstanding_cmds)
359 /* Map the sg table so we have an accurate count of sg entries needed */
360 if (scsi_sg_count(cmd)) {
361 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
362 scsi_sg_count(cmd), cmd->sc_data_direction);
370 /* Calculate the number of request entries needed. */
371 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
372 if (req->cnt < (req_cnt + 2)) {
373 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
374 if (req->ring_index < cnt)
375 req->cnt = cnt - req->ring_index;
377 req->cnt = req->length -
378 (req->ring_index - cnt);
379 /* If still no head room then bail out */
380 if (req->cnt < (req_cnt + 2))
384 /* Build command packet */
385 req->current_outstanding_cmd = handle;
386 req->outstanding_cmds[handle] = sp;
388 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
391 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
392 cmd_pkt->handle = handle;
393 /* Zero out remaining portion of packet. */
394 clr_ptr = (uint32_t *)cmd_pkt + 2;
395 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
396 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398 /* Set target ID and LUN number*/
399 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
400 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
401 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
403 /* Load SCSI command packet. */
404 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
405 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
407 /* Build IOCB segments */
408 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
410 /* Set total data segment count. */
411 cmd_pkt->entry_count = (uint8_t)req_cnt;
414 /* Adjust ring index. */
416 if (req->ring_index == req->length) {
418 req->ring_ptr = req->ring;
422 sp->flags |= SRB_DMA_VALID;
424 /* Set chip new ring index. */
425 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
426 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
428 /* Manage unprocessed RIO/ZIO commands in response queue. */
429 if (vha->flags.process_response_queue &&
430 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
431 qla2x00_process_response_queue(rsp);
433 spin_unlock_irqrestore(&ha->hardware_lock, flags);
434 return (QLA_SUCCESS);
440 spin_unlock_irqrestore(&ha->hardware_lock, flags);
442 return (QLA_FUNCTION_FAILED);
446 * qla2x00_start_iocbs() - Execute the IOCB command
448 * @req: request queue
451 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
453 struct qla_hw_data *ha = vha->hw;
454 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
456 if (IS_P3P_TYPE(ha)) {
457 qla82xx_start_iocbs(vha);
459 /* Adjust ring index. */
461 if (req->ring_index == req->length) {
463 req->ring_ptr = req->ring;
467 /* Set chip new ring index. */
468 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
469 WRT_REG_DWORD(req->req_q_in, req->ring_index);
470 } else if (IS_QLA83XX(ha)) {
471 WRT_REG_DWORD(req->req_q_in, req->ring_index);
472 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
473 } else if (IS_QLAFX00(ha)) {
474 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
475 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
476 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
477 } else if (IS_FWI2_CAPABLE(ha)) {
478 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
479 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
481 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
483 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
489 * qla2x00_marker() - Send a marker IOCB to the firmware.
491 * @qpair: queue pair pointer
494 * @type: marker modifier
496 * Can be called from both normal and interrupt context.
498 * Returns non-zero if a failure occurred, else zero.
501 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
502 uint16_t loop_id, uint64_t lun, uint8_t type)
505 struct mrk_entry_24xx *mrk24 = NULL;
506 struct req_que *req = qpair->req;
507 struct qla_hw_data *ha = vha->hw;
508 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
510 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
512 ql_log(ql_log_warn, base_vha, 0x3026,
513 "Failed to allocate Marker IOCB.\n");
515 return (QLA_FUNCTION_FAILED);
518 mrk->entry_type = MARKER_TYPE;
519 mrk->modifier = type;
520 if (type != MK_SYNC_ALL) {
521 if (IS_FWI2_CAPABLE(ha)) {
522 mrk24 = (struct mrk_entry_24xx *) mrk;
523 mrk24->nport_handle = cpu_to_le16(loop_id);
524 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
525 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
526 mrk24->vp_index = vha->vp_idx;
527 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
529 SET_TARGET_ID(ha, mrk->target, loop_id);
530 mrk->lun = cpu_to_le16((uint16_t)lun);
535 qla2x00_start_iocbs(vha, req);
537 return (QLA_SUCCESS);
541 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
542 uint16_t loop_id, uint64_t lun, uint8_t type)
545 unsigned long flags = 0;
547 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
548 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
549 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
555 * qla2x00_issue_marker
558 * Caller CAN have hardware lock held as specified by ha_locked parameter.
559 * Might release it, then reaquire.
561 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
564 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
565 MK_SYNC_ALL) != QLA_SUCCESS)
566 return QLA_FUNCTION_FAILED;
568 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
569 MK_SYNC_ALL) != QLA_SUCCESS)
570 return QLA_FUNCTION_FAILED;
572 vha->marker_needed = 0;
578 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
581 uint32_t *cur_dsd = NULL;
582 scsi_qla_host_t *vha;
583 struct qla_hw_data *ha;
584 struct scsi_cmnd *cmd;
585 struct scatterlist *cur_seg;
589 uint8_t first_iocb = 1;
590 uint32_t dsd_list_len;
591 struct dsd_dma *dsd_ptr;
594 cmd = GET_CMD_SP(sp);
596 /* Update entry type to indicate Command Type 3 IOCB */
597 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
599 /* No data transfer */
600 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
601 cmd_pkt->byte_count = cpu_to_le32(0);
608 /* Set transfer direction */
609 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
610 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
611 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
612 vha->qla_stats.output_requests++;
613 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
614 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
615 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
616 vha->qla_stats.input_requests++;
619 cur_seg = scsi_sglist(cmd);
620 ctx = GET_CMD_CTX_SP(sp);
623 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
624 QLA_DSDS_PER_IOCB : tot_dsds;
625 tot_dsds -= avail_dsds;
626 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
628 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
629 struct dsd_dma, list);
630 next_dsd = dsd_ptr->dsd_addr;
631 list_del(&dsd_ptr->list);
633 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
639 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
640 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
641 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
642 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
644 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
645 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
646 *cur_dsd++ = cpu_to_le32(dsd_list_len);
648 cur_dsd = (uint32_t *)next_dsd;
652 sle_dma = sg_dma_address(cur_seg);
653 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
654 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
656 cur_seg = sg_next(cur_seg);
661 /* Null termination */
665 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
670 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
671 * for Command Type 6.
673 * @dsds: number of data segment decriptors needed
675 * Returns the number of dsd list needed to store @dsds.
677 static inline uint16_t
678 qla24xx_calc_dsd_lists(uint16_t dsds)
680 uint16_t dsd_lists = 0;
682 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
683 if (dsds % QLA_DSDS_PER_IOCB)
690 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
693 * @sp: SRB command to process
694 * @cmd_pkt: Command type 3 IOCB
695 * @tot_dsds: Total number of segments to transfer
696 * @req: pointer to request queue
699 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
700 uint16_t tot_dsds, struct req_que *req)
704 scsi_qla_host_t *vha;
705 struct scsi_cmnd *cmd;
706 struct scatterlist *sg;
709 cmd = GET_CMD_SP(sp);
711 /* Update entry type to indicate Command Type 3 IOCB */
712 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
714 /* No data transfer */
715 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
716 cmd_pkt->byte_count = cpu_to_le32(0);
722 /* Set transfer direction */
723 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
724 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
725 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
726 vha->qla_stats.output_requests++;
727 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
728 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
729 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
730 vha->qla_stats.input_requests++;
733 /* One DSD is available in the Command Type 3 IOCB */
735 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
737 /* Load data segments */
739 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
741 cont_a64_entry_t *cont_pkt;
743 /* Allocate additional continuation packets? */
744 if (avail_dsds == 0) {
746 * Five DSDs are available in the Continuation
749 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
750 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
754 sle_dma = sg_dma_address(sg);
755 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
756 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
757 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
762 struct fw_dif_context {
765 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
766 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
770 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
774 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
775 unsigned int protcnt)
777 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
779 switch (scsi_get_prot_type(cmd)) {
780 case SCSI_PROT_DIF_TYPE0:
782 * No check for ql2xenablehba_err_chk, as it would be an
783 * I/O error if hba tag generation is not done.
785 pkt->ref_tag = cpu_to_le32((uint32_t)
786 (0xffffffff & scsi_get_lba(cmd)));
788 if (!qla2x00_hba_err_chk_enabled(sp))
791 pkt->ref_tag_mask[0] = 0xff;
792 pkt->ref_tag_mask[1] = 0xff;
793 pkt->ref_tag_mask[2] = 0xff;
794 pkt->ref_tag_mask[3] = 0xff;
798 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
799 * match LBA in CDB + N
801 case SCSI_PROT_DIF_TYPE2:
802 pkt->app_tag = cpu_to_le16(0);
803 pkt->app_tag_mask[0] = 0x0;
804 pkt->app_tag_mask[1] = 0x0;
806 pkt->ref_tag = cpu_to_le32((uint32_t)
807 (0xffffffff & scsi_get_lba(cmd)));
809 if (!qla2x00_hba_err_chk_enabled(sp))
812 /* enable ALL bytes of the ref tag */
813 pkt->ref_tag_mask[0] = 0xff;
814 pkt->ref_tag_mask[1] = 0xff;
815 pkt->ref_tag_mask[2] = 0xff;
816 pkt->ref_tag_mask[3] = 0xff;
819 /* For Type 3 protection: 16 bit GUARD only */
820 case SCSI_PROT_DIF_TYPE3:
821 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
822 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
827 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
830 case SCSI_PROT_DIF_TYPE1:
831 pkt->ref_tag = cpu_to_le32((uint32_t)
832 (0xffffffff & scsi_get_lba(cmd)));
833 pkt->app_tag = cpu_to_le16(0);
834 pkt->app_tag_mask[0] = 0x0;
835 pkt->app_tag_mask[1] = 0x0;
837 if (!qla2x00_hba_err_chk_enabled(sp))
840 /* enable ALL bytes of the ref tag */
841 pkt->ref_tag_mask[0] = 0xff;
842 pkt->ref_tag_mask[1] = 0xff;
843 pkt->ref_tag_mask[2] = 0xff;
844 pkt->ref_tag_mask[3] = 0xff;
850 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
853 struct scatterlist *sg;
854 uint32_t cumulative_partial, sg_len;
855 dma_addr_t sg_dma_addr;
857 if (sgx->num_bytes == sgx->tot_bytes)
861 cumulative_partial = sgx->tot_partial;
863 sg_dma_addr = sg_dma_address(sg);
864 sg_len = sg_dma_len(sg);
866 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
868 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
869 sgx->dma_len = (blk_sz - cumulative_partial);
870 sgx->tot_partial = 0;
871 sgx->num_bytes += blk_sz;
874 sgx->dma_len = sg_len - sgx->bytes_consumed;
875 sgx->tot_partial += sgx->dma_len;
879 sgx->bytes_consumed += sgx->dma_len;
881 if (sg_len == sgx->bytes_consumed) {
885 sgx->bytes_consumed = 0;
892 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
893 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
896 uint8_t avail_dsds = 0;
897 uint32_t dsd_list_len;
898 struct dsd_dma *dsd_ptr;
899 struct scatterlist *sg_prot;
900 uint32_t *cur_dsd = dsd;
901 uint16_t used_dsds = tot_dsds;
902 uint32_t prot_int; /* protection interval */
906 uint32_t sle_dma_len, tot_prot_dma_len = 0;
907 struct scsi_cmnd *cmd;
909 memset(&sgx, 0, sizeof(struct qla2_sgx));
911 cmd = GET_CMD_SP(sp);
912 prot_int = cmd->device->sector_size;
914 sgx.tot_bytes = scsi_bufflen(cmd);
915 sgx.cur_sg = scsi_sglist(cmd);
918 sg_prot = scsi_prot_sglist(cmd);
920 prot_int = tc->blk_sz;
921 sgx.tot_bytes = tc->bufflen;
923 sg_prot = tc->prot_sg;
929 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
931 sle_dma = sgx.dma_addr;
932 sle_dma_len = sgx.dma_len;
934 /* Allocate additional continuation packets? */
935 if (avail_dsds == 0) {
936 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
937 QLA_DSDS_PER_IOCB : used_dsds;
938 dsd_list_len = (avail_dsds + 1) * 12;
939 used_dsds -= avail_dsds;
941 /* allocate tracking DS */
942 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
946 /* allocate new list */
947 dsd_ptr->dsd_addr = next_dsd =
948 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
949 &dsd_ptr->dsd_list_dma);
953 * Need to cleanup only this dsd_ptr, rest
954 * will be done by sp_free_dma()
961 list_add_tail(&dsd_ptr->list,
962 &((struct crc_context *)
963 sp->u.scmd.ctx)->dsd_list);
965 sp->flags |= SRB_CRC_CTX_DSD_VALID;
967 list_add_tail(&dsd_ptr->list,
968 &(tc->ctx->dsd_list));
969 *tc->ctx_dsd_alloced = 1;
973 /* add new list to cmd iocb or last list */
974 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
975 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
976 *cur_dsd++ = dsd_list_len;
977 cur_dsd = (uint32_t *)next_dsd;
979 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
980 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
981 *cur_dsd++ = cpu_to_le32(sle_dma_len);
985 /* Got a full protection interval */
986 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
989 tot_prot_dma_len += sle_dma_len;
990 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
991 tot_prot_dma_len = 0;
992 sg_prot = sg_next(sg_prot);
995 partial = 1; /* So as to not re-enter this block */
999 /* Null termination */
1007 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1008 uint16_t tot_dsds, struct qla_tc_param *tc)
1011 uint8_t avail_dsds = 0;
1012 uint32_t dsd_list_len;
1013 struct dsd_dma *dsd_ptr;
1014 struct scatterlist *sg, *sgl;
1015 uint32_t *cur_dsd = dsd;
1017 uint16_t used_dsds = tot_dsds;
1018 struct scsi_cmnd *cmd;
1021 cmd = GET_CMD_SP(sp);
1022 sgl = scsi_sglist(cmd);
1031 for_each_sg(sgl, sg, tot_dsds, i) {
1034 /* Allocate additional continuation packets? */
1035 if (avail_dsds == 0) {
1036 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1037 QLA_DSDS_PER_IOCB : used_dsds;
1038 dsd_list_len = (avail_dsds + 1) * 12;
1039 used_dsds -= avail_dsds;
1041 /* allocate tracking DS */
1042 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1046 /* allocate new list */
1047 dsd_ptr->dsd_addr = next_dsd =
1048 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1049 &dsd_ptr->dsd_list_dma);
1053 * Need to cleanup only this dsd_ptr, rest
1054 * will be done by sp_free_dma()
1061 list_add_tail(&dsd_ptr->list,
1062 &((struct crc_context *)
1063 sp->u.scmd.ctx)->dsd_list);
1065 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1067 list_add_tail(&dsd_ptr->list,
1068 &(tc->ctx->dsd_list));
1069 *tc->ctx_dsd_alloced = 1;
1072 /* add new list to cmd iocb or last list */
1073 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1074 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1075 *cur_dsd++ = dsd_list_len;
1076 cur_dsd = (uint32_t *)next_dsd;
1078 sle_dma = sg_dma_address(sg);
1080 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1081 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1082 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1086 /* Null termination */
1094 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1095 uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1097 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1098 struct scatterlist *sg, *sgl;
1099 struct crc_context *difctx = NULL;
1100 struct scsi_qla_host *vha;
1102 uint avail_dsds = 0;
1103 uint used_dsds = tot_dsds;
1104 bool dif_local_dma_alloc = false;
1105 bool direction_to_device = false;
1109 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1111 sgl = scsi_prot_sglist(cmd);
1113 difctx = sp->u.scmd.ctx;
1114 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1115 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1116 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1117 __func__, cmd, difctx, sp);
1122 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1128 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1129 "%s: enter (write=%u)\n", __func__, direction_to_device);
1131 /* if initiator doing write or target doing read */
1132 if (direction_to_device) {
1133 for_each_sg(sgl, sg, tot_dsds, i) {
1134 u64 sle_phys = sg_phys(sg);
1136 /* If SGE addr + len flips bits in upper 32-bits */
1137 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1138 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1139 "%s: page boundary crossing (phys=%llx len=%x)\n",
1140 __func__, sle_phys, sg->length);
1143 ha->dif_bundle_crossed_pages++;
1144 dif_local_dma_alloc = true;
1146 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1148 "%s: difctx pointer is NULL\n",
1154 ha->dif_bundle_writes++;
1156 ha->dif_bundle_reads++;
1159 if (ql2xdifbundlinginternalbuffers)
1160 dif_local_dma_alloc = direction_to_device;
1162 if (dif_local_dma_alloc) {
1163 u32 track_difbundl_buf = 0;
1164 u32 ldma_sg_len = 0;
1167 difctx->no_dif_bundl = 0;
1168 difctx->dif_bundl_len = 0;
1170 /* Track DSD buffers */
1171 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1172 /* Track local DMA buffers */
1173 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1175 for_each_sg(sgl, sg, tot_dsds, i) {
1176 u32 sglen = sg_dma_len(sg);
1178 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1179 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1180 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1181 difctx->dif_bundl_len, ldma_needed);
1188 * Allocate list item to store
1191 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1194 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1195 "%s: failed alloc dsd_ptr\n",
1199 ha->dif_bundle_kallocs++;
1201 /* allocate dma buffer */
1202 dsd_ptr->dsd_addr = dma_pool_alloc
1203 (ha->dif_bundl_pool, GFP_ATOMIC,
1204 &dsd_ptr->dsd_list_dma);
1205 if (!dsd_ptr->dsd_addr) {
1206 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1207 "%s: failed alloc ->dsd_ptr\n",
1210 * need to cleanup only this
1211 * dsd_ptr rest will be done
1215 ha->dif_bundle_kallocs--;
1218 ha->dif_bundle_dma_allocs++;
1220 difctx->no_dif_bundl++;
1221 list_add_tail(&dsd_ptr->list,
1222 &difctx->ldif_dma_hndl_list);
1225 /* xfrlen is min of dma pool size and sglen */
1227 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1228 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1231 /* replace with local allocated dma buffer */
1232 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1233 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1234 difctx->dif_bundl_len);
1235 difctx->dif_bundl_len += xfrlen;
1237 ldma_sg_len += xfrlen;
1238 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1246 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1247 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1248 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1249 difctx->dif_bundl_len, difctx->no_dif_bundl,
1250 track_difbundl_buf);
1253 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1255 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1257 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1258 &difctx->ldif_dma_hndl_list, list) {
1259 u32 sglen = (difctx->dif_bundl_len >
1260 DIF_BUNDLING_DMA_POOL_SIZE) ?
1261 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1263 BUG_ON(track_difbundl_buf == 0);
1265 /* Allocate additional continuation packets? */
1266 if (avail_dsds == 0) {
1267 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1269 "%s: adding continuation iocb's\n",
1271 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1272 QLA_DSDS_PER_IOCB : used_dsds;
1273 dsd_list_len = (avail_dsds + 1) * 12;
1274 used_dsds -= avail_dsds;
1276 /* allocate tracking DS */
1277 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1279 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1280 "%s: failed alloc dsd_ptr\n",
1284 ha->dif_bundle_kallocs++;
1286 difctx->no_ldif_dsd++;
1287 /* allocate new list */
1289 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1290 &dsd_ptr->dsd_list_dma);
1291 if (!dsd_ptr->dsd_addr) {
1292 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1293 "%s: failed alloc ->dsd_addr\n",
1296 * need to cleanup only this dsd_ptr
1297 * rest will be done by sp_free_dma()
1300 ha->dif_bundle_kallocs--;
1303 ha->dif_bundle_dma_allocs++;
1306 list_add_tail(&dsd_ptr->list,
1307 &difctx->ldif_dsd_list);
1308 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1310 list_add_tail(&dsd_ptr->list,
1311 &difctx->ldif_dsd_list);
1312 tc->ctx_dsd_alloced = 1;
1315 /* add new list to cmd iocb or last list */
1317 cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1319 cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1320 *cur_dsd++ = dsd_list_len;
1321 cur_dsd = dsd_ptr->dsd_addr;
1323 *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma));
1324 *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma));
1325 *cur_dsd++ = cpu_to_le32(sglen);
1327 difctx->dif_bundl_len -= sglen;
1328 track_difbundl_buf--;
1331 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1332 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1333 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1335 for_each_sg(sgl, sg, tot_dsds, i) {
1338 /* Allocate additional continuation packets? */
1339 if (avail_dsds == 0) {
1340 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1341 QLA_DSDS_PER_IOCB : used_dsds;
1342 dsd_list_len = (avail_dsds + 1) * 12;
1343 used_dsds -= avail_dsds;
1345 /* allocate tracking DS */
1346 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1348 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1350 "%s: failed alloc dsd_dma...\n",
1355 /* allocate new list */
1357 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1358 &dsd_ptr->dsd_list_dma);
1359 if (!dsd_ptr->dsd_addr) {
1360 /* need to cleanup only this dsd_ptr */
1361 /* rest will be done by sp_free_dma() */
1367 list_add_tail(&dsd_ptr->list,
1369 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1371 list_add_tail(&dsd_ptr->list,
1373 tc->ctx_dsd_alloced = 1;
1376 /* add new list to cmd iocb or last list */
1378 cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1380 cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1381 *cur_dsd++ = dsd_list_len;
1382 cur_dsd = dsd_ptr->dsd_addr;
1384 sle_dma = sg_dma_address(sg);
1385 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1386 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1387 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1391 /* Null termination */
1398 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1399 * Type 6 IOCB types.
1401 * @sp: SRB command to process
1402 * @cmd_pkt: Command type 3 IOCB
1403 * @tot_dsds: Total number of segments to transfer
1404 * @tot_prot_dsds: Total number of segments with protection information
1405 * @fw_prot_opts: Protection options to be passed to firmware
1408 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1409 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1411 uint32_t *cur_dsd, *fcp_dl;
1412 scsi_qla_host_t *vha;
1413 struct scsi_cmnd *cmd;
1414 uint32_t total_bytes = 0;
1415 uint32_t data_bytes;
1417 uint8_t bundling = 1;
1419 struct crc_context *crc_ctx_pkt = NULL;
1420 struct qla_hw_data *ha;
1421 uint8_t additional_fcpcdb_len;
1422 uint16_t fcp_cmnd_len;
1423 struct fcp_cmnd *fcp_cmnd;
1424 dma_addr_t crc_ctx_dma;
1426 cmd = GET_CMD_SP(sp);
1428 /* Update entry type to indicate Command Type CRC_2 IOCB */
1429 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1434 /* No data transfer */
1435 data_bytes = scsi_bufflen(cmd);
1436 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1437 cmd_pkt->byte_count = cpu_to_le32(0);
1441 cmd_pkt->vp_index = sp->vha->vp_idx;
1443 /* Set transfer direction */
1444 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1445 cmd_pkt->control_flags =
1446 cpu_to_le16(CF_WRITE_DATA);
1447 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1448 cmd_pkt->control_flags =
1449 cpu_to_le16(CF_READ_DATA);
1452 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1453 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1454 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1455 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1458 /* Allocate CRC context from global pool */
1459 crc_ctx_pkt = sp->u.scmd.ctx =
1460 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1463 goto crc_queuing_error;
1465 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1467 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1470 crc_ctx_pkt->handle = cmd_pkt->handle;
1472 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1474 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1475 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1477 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1478 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1479 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1481 /* Determine SCSI command length -- align to 4 byte boundary */
1482 if (cmd->cmd_len > 16) {
1483 additional_fcpcdb_len = cmd->cmd_len - 16;
1484 if ((cmd->cmd_len % 4) != 0) {
1485 /* SCSI cmd > 16 bytes must be multiple of 4 */
1486 goto crc_queuing_error;
1488 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1490 additional_fcpcdb_len = 0;
1491 fcp_cmnd_len = 12 + 16 + 4;
1494 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1496 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1497 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1498 fcp_cmnd->additional_cdb_len |= 1;
1499 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1500 fcp_cmnd->additional_cdb_len |= 2;
1502 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1503 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1504 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1505 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1506 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1507 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1508 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1509 fcp_cmnd->task_management = 0;
1510 fcp_cmnd->task_attribute = TSK_SIMPLE;
1512 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1514 /* Compute dif len and adjust data len to incude protection */
1516 blk_size = cmd->device->sector_size;
1517 dif_bytes = (data_bytes / blk_size) * 8;
1519 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1520 case SCSI_PROT_READ_INSERT:
1521 case SCSI_PROT_WRITE_STRIP:
1522 total_bytes = data_bytes;
1523 data_bytes += dif_bytes;
1526 case SCSI_PROT_READ_STRIP:
1527 case SCSI_PROT_WRITE_INSERT:
1528 case SCSI_PROT_READ_PASS:
1529 case SCSI_PROT_WRITE_PASS:
1530 total_bytes = data_bytes + dif_bytes;
1536 if (!qla2x00_hba_err_chk_enabled(sp))
1537 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1538 /* HBA error checking enabled */
1539 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1540 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1541 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1542 SCSI_PROT_DIF_TYPE2))
1543 fw_prot_opts |= BIT_10;
1544 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1545 SCSI_PROT_DIF_TYPE3)
1546 fw_prot_opts |= BIT_11;
1550 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1553 * Configure Bundling if we need to fetch interlaving
1554 * protection PCI accesses
1556 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1557 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1558 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1560 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1563 /* Finish the common fields of CRC pkt */
1564 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1565 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1566 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1567 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1568 /* Fibre channel byte count */
1569 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1570 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1571 additional_fcpcdb_len);
1572 *fcp_dl = htonl(total_bytes);
1574 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1575 cmd_pkt->byte_count = cpu_to_le32(0);
1578 /* Walks data segments */
1580 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1582 if (!bundling && tot_prot_dsds) {
1583 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1584 cur_dsd, tot_dsds, NULL))
1585 goto crc_queuing_error;
1586 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1587 (tot_dsds - tot_prot_dsds), NULL))
1588 goto crc_queuing_error;
1590 if (bundling && tot_prot_dsds) {
1591 /* Walks dif segments */
1592 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1593 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1594 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1595 tot_prot_dsds, NULL))
1596 goto crc_queuing_error;
1601 /* Cleanup will be performed by the caller */
1603 return QLA_FUNCTION_FAILED;
1607 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1608 * @sp: command to send to the ISP
1610 * Returns non-zero if a failure occurred, else zero.
1613 qla24xx_start_scsi(srb_t *sp)
1616 unsigned long flags;
1620 struct cmd_type_7 *cmd_pkt;
1624 struct req_que *req = NULL;
1625 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1626 struct scsi_qla_host *vha = sp->vha;
1627 struct qla_hw_data *ha = vha->hw;
1629 /* Setup device pointers. */
1632 /* So we know we haven't pci_map'ed anything yet */
1635 /* Send marker if required */
1636 if (vha->marker_needed != 0) {
1637 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1639 return QLA_FUNCTION_FAILED;
1640 vha->marker_needed = 0;
1643 /* Acquire ring specific lock */
1644 spin_lock_irqsave(&ha->hardware_lock, flags);
1646 /* Check for room in outstanding command list. */
1647 handle = req->current_outstanding_cmd;
1648 for (index = 1; index < req->num_outstanding_cmds; index++) {
1650 if (handle == req->num_outstanding_cmds)
1652 if (!req->outstanding_cmds[handle])
1655 if (index == req->num_outstanding_cmds)
1658 /* Map the sg table so we have an accurate count of sg entries needed */
1659 if (scsi_sg_count(cmd)) {
1660 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1661 scsi_sg_count(cmd), cmd->sc_data_direction);
1662 if (unlikely(!nseg))
1668 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1669 if (req->cnt < (req_cnt + 2)) {
1670 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1671 RD_REG_DWORD_RELAXED(req->req_q_out);
1672 if (req->ring_index < cnt)
1673 req->cnt = cnt - req->ring_index;
1675 req->cnt = req->length -
1676 (req->ring_index - cnt);
1677 if (req->cnt < (req_cnt + 2))
1681 /* Build command packet. */
1682 req->current_outstanding_cmd = handle;
1683 req->outstanding_cmds[handle] = sp;
1684 sp->handle = handle;
1685 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1686 req->cnt -= req_cnt;
1688 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1689 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1691 /* Zero out remaining portion of packet. */
1692 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1693 clr_ptr = (uint32_t *)cmd_pkt + 2;
1694 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1695 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1697 /* Set NPORT-ID and LUN number*/
1698 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1699 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1700 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1701 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1702 cmd_pkt->vp_index = sp->vha->vp_idx;
1704 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1705 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1707 cmd_pkt->task = TSK_SIMPLE;
1709 /* Load SCSI command packet. */
1710 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1711 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1713 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1715 /* Build IOCB segments */
1716 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1718 /* Set total data segment count. */
1719 cmd_pkt->entry_count = (uint8_t)req_cnt;
1721 /* Adjust ring index. */
1723 if (req->ring_index == req->length) {
1724 req->ring_index = 0;
1725 req->ring_ptr = req->ring;
1729 sp->flags |= SRB_DMA_VALID;
1731 /* Set chip new ring index. */
1732 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1734 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1739 scsi_dma_unmap(cmd);
1741 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1743 return QLA_FUNCTION_FAILED;
1747 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1748 * @sp: command to send to the ISP
1750 * Returns non-zero if a failure occurred, else zero.
1753 qla24xx_dif_start_scsi(srb_t *sp)
1756 unsigned long flags;
1761 uint16_t req_cnt = 0;
1763 uint16_t tot_prot_dsds;
1764 uint16_t fw_prot_opts = 0;
1765 struct req_que *req = NULL;
1766 struct rsp_que *rsp = NULL;
1767 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1768 struct scsi_qla_host *vha = sp->vha;
1769 struct qla_hw_data *ha = vha->hw;
1770 struct cmd_type_crc_2 *cmd_pkt;
1771 uint32_t status = 0;
1773 #define QDSS_GOT_Q_SPACE BIT_0
1775 /* Only process protection or >16 cdb in this routine */
1776 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1777 if (cmd->cmd_len <= 16)
1778 return qla24xx_start_scsi(sp);
1781 /* Setup device pointers. */
1785 /* So we know we haven't pci_map'ed anything yet */
1788 /* Send marker if required */
1789 if (vha->marker_needed != 0) {
1790 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1792 return QLA_FUNCTION_FAILED;
1793 vha->marker_needed = 0;
1796 /* Acquire ring specific lock */
1797 spin_lock_irqsave(&ha->hardware_lock, flags);
1799 /* Check for room in outstanding command list. */
1800 handle = req->current_outstanding_cmd;
1801 for (index = 1; index < req->num_outstanding_cmds; index++) {
1803 if (handle == req->num_outstanding_cmds)
1805 if (!req->outstanding_cmds[handle])
1809 if (index == req->num_outstanding_cmds)
1812 /* Compute number of required data segments */
1813 /* Map the sg table so we have an accurate count of sg entries needed */
1814 if (scsi_sg_count(cmd)) {
1815 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1816 scsi_sg_count(cmd), cmd->sc_data_direction);
1817 if (unlikely(!nseg))
1820 sp->flags |= SRB_DMA_VALID;
1822 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1823 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1824 struct qla2_sgx sgx;
1827 memset(&sgx, 0, sizeof(struct qla2_sgx));
1828 sgx.tot_bytes = scsi_bufflen(cmd);
1829 sgx.cur_sg = scsi_sglist(cmd);
1833 while (qla24xx_get_one_block_sg(
1834 cmd->device->sector_size, &sgx, &partial))
1840 /* number of required data segments */
1843 /* Compute number of required protection segments */
1844 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1845 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1846 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1847 if (unlikely(!nseg))
1850 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1852 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1853 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1854 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1861 /* Total Data and protection sg segment(s) */
1862 tot_prot_dsds = nseg;
1864 if (req->cnt < (req_cnt + 2)) {
1865 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1866 RD_REG_DWORD_RELAXED(req->req_q_out);
1867 if (req->ring_index < cnt)
1868 req->cnt = cnt - req->ring_index;
1870 req->cnt = req->length -
1871 (req->ring_index - cnt);
1872 if (req->cnt < (req_cnt + 2))
1876 status |= QDSS_GOT_Q_SPACE;
1878 /* Build header part of command packet (excluding the OPCODE). */
1879 req->current_outstanding_cmd = handle;
1880 req->outstanding_cmds[handle] = sp;
1881 sp->handle = handle;
1882 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1883 req->cnt -= req_cnt;
1885 /* Fill-in common area */
1886 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1887 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1889 clr_ptr = (uint32_t *)cmd_pkt + 2;
1890 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1892 /* Set NPORT-ID and LUN number*/
1893 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1894 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1895 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1896 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1898 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1899 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1901 /* Total Data and protection segment(s) */
1902 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1904 /* Build IOCB segments and adjust for data protection segments */
1905 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1906 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1910 cmd_pkt->entry_count = (uint8_t)req_cnt;
1911 /* Specify response queue number where completion should happen */
1912 cmd_pkt->entry_status = (uint8_t) rsp->id;
1913 cmd_pkt->timeout = cpu_to_le16(0);
1916 /* Adjust ring index. */
1918 if (req->ring_index == req->length) {
1919 req->ring_index = 0;
1920 req->ring_ptr = req->ring;
1924 /* Set chip new ring index. */
1925 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1927 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1932 if (status & QDSS_GOT_Q_SPACE) {
1933 req->outstanding_cmds[handle] = NULL;
1934 req->cnt += req_cnt;
1936 /* Cleanup will be performed by the caller (queuecommand) */
1938 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1939 return QLA_FUNCTION_FAILED;
1943 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1944 * @sp: command to send to the ISP
1946 * Returns non-zero if a failure occurred, else zero.
1949 qla2xxx_start_scsi_mq(srb_t *sp)
1952 unsigned long flags;
1956 struct cmd_type_7 *cmd_pkt;
1960 struct req_que *req = NULL;
1961 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1962 struct scsi_qla_host *vha = sp->fcport->vha;
1963 struct qla_hw_data *ha = vha->hw;
1964 struct qla_qpair *qpair = sp->qpair;
1966 /* Acquire qpair specific lock */
1967 spin_lock_irqsave(&qpair->qp_lock, flags);
1969 /* Setup qpair pointers */
1972 /* So we know we haven't pci_map'ed anything yet */
1975 /* Send marker if required */
1976 if (vha->marker_needed != 0) {
1977 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1979 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1980 return QLA_FUNCTION_FAILED;
1982 vha->marker_needed = 0;
1985 /* Check for room in outstanding command list. */
1986 handle = req->current_outstanding_cmd;
1987 for (index = 1; index < req->num_outstanding_cmds; index++) {
1989 if (handle == req->num_outstanding_cmds)
1991 if (!req->outstanding_cmds[handle])
1994 if (index == req->num_outstanding_cmds)
1997 /* Map the sg table so we have an accurate count of sg entries needed */
1998 if (scsi_sg_count(cmd)) {
1999 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2000 scsi_sg_count(cmd), cmd->sc_data_direction);
2001 if (unlikely(!nseg))
2007 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2008 if (req->cnt < (req_cnt + 2)) {
2009 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2010 RD_REG_DWORD_RELAXED(req->req_q_out);
2011 if (req->ring_index < cnt)
2012 req->cnt = cnt - req->ring_index;
2014 req->cnt = req->length -
2015 (req->ring_index - cnt);
2016 if (req->cnt < (req_cnt + 2))
2020 /* Build command packet. */
2021 req->current_outstanding_cmd = handle;
2022 req->outstanding_cmds[handle] = sp;
2023 sp->handle = handle;
2024 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2025 req->cnt -= req_cnt;
2027 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2028 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2030 /* Zero out remaining portion of packet. */
2031 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2032 clr_ptr = (uint32_t *)cmd_pkt + 2;
2033 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2034 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2036 /* Set NPORT-ID and LUN number*/
2037 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2038 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2039 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2040 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2041 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2043 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2044 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2046 cmd_pkt->task = TSK_SIMPLE;
2048 /* Load SCSI command packet. */
2049 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2050 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2052 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2054 /* Build IOCB segments */
2055 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2057 /* Set total data segment count. */
2058 cmd_pkt->entry_count = (uint8_t)req_cnt;
2060 /* Adjust ring index. */
2062 if (req->ring_index == req->length) {
2063 req->ring_index = 0;
2064 req->ring_ptr = req->ring;
2068 sp->flags |= SRB_DMA_VALID;
2070 /* Set chip new ring index. */
2071 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2073 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2078 scsi_dma_unmap(cmd);
2080 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2082 return QLA_FUNCTION_FAILED;
2087 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2088 * @sp: command to send to the ISP
2090 * Returns non-zero if a failure occurred, else zero.
2093 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2096 unsigned long flags;
2101 uint16_t req_cnt = 0;
2103 uint16_t tot_prot_dsds;
2104 uint16_t fw_prot_opts = 0;
2105 struct req_que *req = NULL;
2106 struct rsp_que *rsp = NULL;
2107 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2108 struct scsi_qla_host *vha = sp->fcport->vha;
2109 struct qla_hw_data *ha = vha->hw;
2110 struct cmd_type_crc_2 *cmd_pkt;
2111 uint32_t status = 0;
2112 struct qla_qpair *qpair = sp->qpair;
2114 #define QDSS_GOT_Q_SPACE BIT_0
2116 /* Check for host side state */
2117 if (!qpair->online) {
2118 cmd->result = DID_NO_CONNECT << 16;
2119 return QLA_INTERFACE_ERROR;
2122 if (!qpair->difdix_supported &&
2123 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2124 cmd->result = DID_NO_CONNECT << 16;
2125 return QLA_INTERFACE_ERROR;
2128 /* Only process protection or >16 cdb in this routine */
2129 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2130 if (cmd->cmd_len <= 16)
2131 return qla2xxx_start_scsi_mq(sp);
2134 spin_lock_irqsave(&qpair->qp_lock, flags);
2136 /* Setup qpair pointers */
2140 /* So we know we haven't pci_map'ed anything yet */
2143 /* Send marker if required */
2144 if (vha->marker_needed != 0) {
2145 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2147 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2148 return QLA_FUNCTION_FAILED;
2150 vha->marker_needed = 0;
2153 /* Check for room in outstanding command list. */
2154 handle = req->current_outstanding_cmd;
2155 for (index = 1; index < req->num_outstanding_cmds; index++) {
2157 if (handle == req->num_outstanding_cmds)
2159 if (!req->outstanding_cmds[handle])
2163 if (index == req->num_outstanding_cmds)
2166 /* Compute number of required data segments */
2167 /* Map the sg table so we have an accurate count of sg entries needed */
2168 if (scsi_sg_count(cmd)) {
2169 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2170 scsi_sg_count(cmd), cmd->sc_data_direction);
2171 if (unlikely(!nseg))
2174 sp->flags |= SRB_DMA_VALID;
2176 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2177 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2178 struct qla2_sgx sgx;
2181 memset(&sgx, 0, sizeof(struct qla2_sgx));
2182 sgx.tot_bytes = scsi_bufflen(cmd);
2183 sgx.cur_sg = scsi_sglist(cmd);
2187 while (qla24xx_get_one_block_sg(
2188 cmd->device->sector_size, &sgx, &partial))
2194 /* number of required data segments */
2197 /* Compute number of required protection segments */
2198 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2199 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2200 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2201 if (unlikely(!nseg))
2204 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2206 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2207 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2208 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2215 /* Total Data and protection sg segment(s) */
2216 tot_prot_dsds = nseg;
2218 if (req->cnt < (req_cnt + 2)) {
2219 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2220 RD_REG_DWORD_RELAXED(req->req_q_out);
2221 if (req->ring_index < cnt)
2222 req->cnt = cnt - req->ring_index;
2224 req->cnt = req->length -
2225 (req->ring_index - cnt);
2226 if (req->cnt < (req_cnt + 2))
2230 status |= QDSS_GOT_Q_SPACE;
2232 /* Build header part of command packet (excluding the OPCODE). */
2233 req->current_outstanding_cmd = handle;
2234 req->outstanding_cmds[handle] = sp;
2235 sp->handle = handle;
2236 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2237 req->cnt -= req_cnt;
2239 /* Fill-in common area */
2240 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2241 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2243 clr_ptr = (uint32_t *)cmd_pkt + 2;
2244 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2246 /* Set NPORT-ID and LUN number*/
2247 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2248 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2249 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2250 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2252 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2253 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2255 /* Total Data and protection segment(s) */
2256 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2258 /* Build IOCB segments and adjust for data protection segments */
2259 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2260 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2264 cmd_pkt->entry_count = (uint8_t)req_cnt;
2265 cmd_pkt->timeout = cpu_to_le16(0);
2268 /* Adjust ring index. */
2270 if (req->ring_index == req->length) {
2271 req->ring_index = 0;
2272 req->ring_ptr = req->ring;
2276 /* Set chip new ring index. */
2277 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2279 /* Manage unprocessed RIO/ZIO commands in response queue. */
2280 if (vha->flags.process_response_queue &&
2281 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2282 qla24xx_process_response_queue(vha, rsp);
2284 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2289 if (status & QDSS_GOT_Q_SPACE) {
2290 req->outstanding_cmds[handle] = NULL;
2291 req->cnt += req_cnt;
2293 /* Cleanup will be performed by the caller (queuecommand) */
2295 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2296 return QLA_FUNCTION_FAILED;
2299 /* Generic Control-SRB manipulation functions. */
2301 /* hardware_lock assumed to be held. */
2304 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2306 scsi_qla_host_t *vha = qpair->vha;
2307 struct qla_hw_data *ha = vha->hw;
2308 struct req_que *req = qpair->req;
2309 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2310 uint32_t index, handle;
2312 uint16_t cnt, req_cnt;
2318 if (sp && (sp->type != SRB_SCSI_CMD)) {
2319 /* Adjust entry-counts as needed. */
2320 req_cnt = sp->iocbs;
2323 /* Check for room on request queue. */
2324 if (req->cnt < req_cnt + 2) {
2325 if (qpair->use_shadow_reg)
2326 cnt = *req->out_ptr;
2327 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2329 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
2330 else if (IS_P3P_TYPE(ha))
2331 cnt = RD_REG_DWORD(®->isp82.req_q_out);
2332 else if (IS_FWI2_CAPABLE(ha))
2333 cnt = RD_REG_DWORD(®->isp24.req_q_out);
2334 else if (IS_QLAFX00(ha))
2335 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
2337 cnt = qla2x00_debounce_register(
2338 ISP_REQ_Q_OUT(ha, ®->isp));
2340 if (req->ring_index < cnt)
2341 req->cnt = cnt - req->ring_index;
2343 req->cnt = req->length -
2344 (req->ring_index - cnt);
2346 if (req->cnt < req_cnt + 2)
2350 /* Check for room in outstanding command list. */
2351 handle = req->current_outstanding_cmd;
2352 for (index = 1; index < req->num_outstanding_cmds; index++) {
2354 if (handle == req->num_outstanding_cmds)
2356 if (!req->outstanding_cmds[handle])
2359 if (index == req->num_outstanding_cmds) {
2360 ql_log(ql_log_warn, vha, 0x700b,
2361 "No room on outstanding cmd array.\n");
2365 /* Prep command array. */
2366 req->current_outstanding_cmd = handle;
2367 req->outstanding_cmds[handle] = sp;
2368 sp->handle = handle;
2372 req->cnt -= req_cnt;
2373 pkt = req->ring_ptr;
2374 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2375 if (IS_QLAFX00(ha)) {
2376 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2377 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2379 pkt->entry_count = req_cnt;
2380 pkt->handle = handle;
2386 qpair->tgt_counters.num_alloc_iocb_failed++;
2391 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2393 scsi_qla_host_t *vha = qpair->vha;
2395 if (qla2x00_reset_active(vha))
2398 return __qla2x00_alloc_iocbs(qpair, sp);
2402 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2404 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2408 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2410 struct srb_iocb *lio = &sp->u.iocb_cmd;
2412 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2413 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2414 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2415 logio->control_flags |= LCF_NVME_PRLI;
2416 if (sp->vha->flags.nvme_first_burst)
2417 logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2420 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2421 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2422 logio->port_id[1] = sp->fcport->d_id.b.area;
2423 logio->port_id[2] = sp->fcport->d_id.b.domain;
2424 logio->vp_index = sp->vha->vp_idx;
2428 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2430 struct srb_iocb *lio = &sp->u.iocb_cmd;
2432 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2433 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2434 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2436 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2437 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2438 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2439 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2440 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2442 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2443 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2444 logio->port_id[1] = sp->fcport->d_id.b.area;
2445 logio->port_id[2] = sp->fcport->d_id.b.domain;
2446 logio->vp_index = sp->vha->vp_idx;
2450 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2452 struct qla_hw_data *ha = sp->vha->hw;
2453 struct srb_iocb *lio = &sp->u.iocb_cmd;
2456 mbx->entry_type = MBX_IOCB_TYPE;
2457 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2458 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2459 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2460 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2461 if (HAS_EXTENDED_IDS(ha)) {
2462 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2463 mbx->mb10 = cpu_to_le16(opts);
2465 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2467 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2468 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2469 sp->fcport->d_id.b.al_pa);
2470 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2474 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2476 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2477 logio->control_flags =
2478 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2479 if (!sp->fcport->keep_nport_handle)
2480 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2481 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2482 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2483 logio->port_id[1] = sp->fcport->d_id.b.area;
2484 logio->port_id[2] = sp->fcport->d_id.b.domain;
2485 logio->vp_index = sp->vha->vp_idx;
2489 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2491 struct qla_hw_data *ha = sp->vha->hw;
2493 mbx->entry_type = MBX_IOCB_TYPE;
2494 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2495 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2496 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2497 cpu_to_le16(sp->fcport->loop_id) :
2498 cpu_to_le16(sp->fcport->loop_id << 8);
2499 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2500 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2501 sp->fcport->d_id.b.al_pa);
2502 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2503 /* Implicit: mbx->mbx10 = 0. */
2507 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2509 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2510 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2511 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2512 logio->vp_index = sp->vha->vp_idx;
2516 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2518 struct qla_hw_data *ha = sp->vha->hw;
2520 mbx->entry_type = MBX_IOCB_TYPE;
2521 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2522 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2523 if (HAS_EXTENDED_IDS(ha)) {
2524 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2525 mbx->mb10 = cpu_to_le16(BIT_0);
2527 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2529 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2530 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2531 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2532 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2533 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2537 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2541 struct fc_port *fcport = sp->fcport;
2542 scsi_qla_host_t *vha = fcport->vha;
2543 struct qla_hw_data *ha = vha->hw;
2544 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2545 struct req_que *req = vha->req;
2547 flags = iocb->u.tmf.flags;
2548 lun = iocb->u.tmf.lun;
2550 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2551 tsk->entry_count = 1;
2552 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2553 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2554 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2555 tsk->control_flags = cpu_to_le32(flags);
2556 tsk->port_id[0] = fcport->d_id.b.al_pa;
2557 tsk->port_id[1] = fcport->d_id.b.area;
2558 tsk->port_id[2] = fcport->d_id.b.domain;
2559 tsk->vp_index = fcport->vha->vp_idx;
2561 if (flags == TCF_LUN_RESET) {
2562 int_to_scsilun(lun, &tsk->lun);
2563 host_to_fcp_swap((uint8_t *)&tsk->lun,
2568 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2570 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2571 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2572 sp->free = qla2x00_sp_free;
2573 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2574 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2575 add_timer(&sp->u.iocb_cmd.timer);
2579 qla2x00_els_dcmd_sp_free(void *data)
2582 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2586 if (elsio->u.els_logo.els_logo_pyld)
2587 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2588 elsio->u.els_logo.els_logo_pyld,
2589 elsio->u.els_logo.els_logo_pyld_dma);
2591 del_timer(&elsio->timer);
2596 qla2x00_els_dcmd_iocb_timeout(void *data)
2599 fc_port_t *fcport = sp->fcport;
2600 struct scsi_qla_host *vha = sp->vha;
2601 struct srb_iocb *lio = &sp->u.iocb_cmd;
2603 ql_dbg(ql_dbg_io, vha, 0x3069,
2604 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2605 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2606 fcport->d_id.b.al_pa);
2608 complete(&lio->u.els_logo.comp);
2612 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2615 fc_port_t *fcport = sp->fcport;
2616 struct srb_iocb *lio = &sp->u.iocb_cmd;
2617 struct scsi_qla_host *vha = sp->vha;
2619 ql_dbg(ql_dbg_io, vha, 0x3072,
2620 "%s hdl=%x, portid=%02x%02x%02x done\n",
2621 sp->name, sp->handle, fcport->d_id.b.domain,
2622 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2624 complete(&lio->u.els_logo.comp);
2628 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2629 port_id_t remote_did)
2632 fc_port_t *fcport = NULL;
2633 struct srb_iocb *elsio = NULL;
2634 struct qla_hw_data *ha = vha->hw;
2635 struct els_logo_payload logo_pyld;
2636 int rval = QLA_SUCCESS;
2638 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2640 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2644 /* Alloc SRB structure */
2645 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2648 ql_log(ql_log_info, vha, 0x70e6,
2649 "SRB allocation failed\n");
2653 elsio = &sp->u.iocb_cmd;
2654 fcport->loop_id = 0xFFFF;
2655 fcport->d_id.b.domain = remote_did.b.domain;
2656 fcport->d_id.b.area = remote_did.b.area;
2657 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2659 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2660 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2662 sp->type = SRB_ELS_DCMD;
2663 sp->name = "ELS_DCMD";
2664 sp->fcport = fcport;
2665 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2666 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2667 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2668 sp->done = qla2x00_els_dcmd_sp_done;
2669 sp->free = qla2x00_els_dcmd_sp_free;
2671 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2672 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2675 if (!elsio->u.els_logo.els_logo_pyld) {
2677 return QLA_FUNCTION_FAILED;
2680 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2682 elsio->u.els_logo.els_cmd = els_opcode;
2683 logo_pyld.opcode = els_opcode;
2684 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2685 logo_pyld.s_id[1] = vha->d_id.b.area;
2686 logo_pyld.s_id[2] = vha->d_id.b.domain;
2687 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2688 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2690 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2691 sizeof(struct els_logo_payload));
2693 rval = qla2x00_start_sp(sp);
2694 if (rval != QLA_SUCCESS) {
2696 return QLA_FUNCTION_FAILED;
2699 ql_dbg(ql_dbg_io, vha, 0x3074,
2700 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2701 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2702 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2704 wait_for_completion(&elsio->u.els_logo.comp);
2711 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2713 scsi_qla_host_t *vha = sp->vha;
2714 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2716 els_iocb->entry_type = ELS_IOCB_TYPE;
2717 els_iocb->entry_count = 1;
2718 els_iocb->sys_define = 0;
2719 els_iocb->entry_status = 0;
2720 els_iocb->handle = sp->handle;
2721 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2722 els_iocb->tx_dsd_count = 1;
2723 els_iocb->vp_index = vha->vp_idx;
2724 els_iocb->sof_type = EST_SOFI3;
2725 els_iocb->rx_dsd_count = 0;
2726 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2728 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2729 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2730 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2731 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2732 els_iocb->s_id[1] = vha->d_id.b.area;
2733 els_iocb->s_id[2] = vha->d_id.b.domain;
2734 els_iocb->control_flags = 0;
2736 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2737 els_iocb->tx_byte_count = els_iocb->tx_len =
2738 sizeof(struct els_plogi_payload);
2739 els_iocb->tx_address[0] =
2740 cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2741 els_iocb->tx_address[1] =
2742 cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2744 els_iocb->rx_dsd_count = 1;
2745 els_iocb->rx_byte_count = els_iocb->rx_len =
2746 sizeof(struct els_plogi_payload);
2747 els_iocb->rx_address[0] =
2748 cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2749 els_iocb->rx_address[1] =
2750 cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2752 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2753 "PLOGI ELS IOCB:\n");
2754 ql_dump_buffer(ql_log_info, vha, 0x0109,
2755 (uint8_t *)els_iocb, 0x70);
2757 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2758 els_iocb->tx_address[0] =
2759 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2760 els_iocb->tx_address[1] =
2761 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2762 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2764 els_iocb->rx_byte_count = 0;
2765 els_iocb->rx_address[0] = 0;
2766 els_iocb->rx_address[1] = 0;
2767 els_iocb->rx_len = 0;
2770 sp->vha->qla_stats.control_requests++;
2774 qla2x00_els_dcmd2_iocb_timeout(void *data)
2777 fc_port_t *fcport = sp->fcport;
2778 struct scsi_qla_host *vha = sp->vha;
2779 struct qla_hw_data *ha = vha->hw;
2780 unsigned long flags = 0;
2783 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2784 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2785 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2787 /* Abort the exchange */
2788 spin_lock_irqsave(&ha->hardware_lock, flags);
2789 res = ha->isp_ops->abort_command(sp);
2790 ql_dbg(ql_dbg_io, vha, 0x3070,
2791 "mbx abort_command %s\n",
2792 (res == QLA_SUCCESS) ? "successful" : "failed");
2793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2795 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2799 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2802 fc_port_t *fcport = sp->fcport;
2803 struct srb_iocb *lio = &sp->u.iocb_cmd;
2804 struct scsi_qla_host *vha = sp->vha;
2805 struct event_arg ea;
2806 struct qla_work_evt *e;
2808 ql_dbg(ql_dbg_disc, vha, 0x3072,
2809 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2810 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2812 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2813 del_timer(&sp->u.iocb_cmd.timer);
2815 if (sp->flags & SRB_WAKEUP_ON_COMP)
2816 complete(&lio->u.els_plogi.comp);
2819 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2821 memset(&ea, 0, sizeof(ea));
2824 ea.event = FCME_ELS_PLOGI_DONE;
2825 qla2x00_fcport_event_handler(vha, &ea);
2828 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2830 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2832 if (elsio->u.els_plogi.els_plogi_pyld)
2833 dma_free_coherent(&sp->vha->hw->pdev->dev,
2834 elsio->u.els_plogi.tx_size,
2835 elsio->u.els_plogi.els_plogi_pyld,
2836 elsio->u.els_plogi.els_plogi_pyld_dma);
2838 if (elsio->u.els_plogi.els_resp_pyld)
2839 dma_free_coherent(&sp->vha->hw->pdev->dev,
2840 elsio->u.els_plogi.rx_size,
2841 elsio->u.els_plogi.els_resp_pyld,
2842 elsio->u.els_plogi.els_resp_pyld_dma);
2847 qla2x00_post_work(vha, e);
2852 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2853 fc_port_t *fcport, bool wait)
2856 struct srb_iocb *elsio = NULL;
2857 struct qla_hw_data *ha = vha->hw;
2858 int rval = QLA_SUCCESS;
2859 void *ptr, *resp_ptr;
2861 /* Alloc SRB structure */
2862 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2864 ql_log(ql_log_info, vha, 0x70e6,
2865 "SRB allocation failed\n");
2869 elsio = &sp->u.iocb_cmd;
2870 ql_dbg(ql_dbg_io, vha, 0x3073,
2871 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2873 fcport->flags |= FCF_ASYNC_SENT;
2874 sp->type = SRB_ELS_DCMD;
2875 sp->name = "ELS_DCMD";
2876 sp->fcport = fcport;
2878 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2879 init_completion(&elsio->u.els_plogi.comp);
2881 sp->flags = SRB_WAKEUP_ON_COMP;
2883 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2885 sp->done = qla2x00_els_dcmd2_sp_done;
2886 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2888 ptr = elsio->u.els_plogi.els_plogi_pyld =
2889 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2890 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2892 if (!elsio->u.els_plogi.els_plogi_pyld) {
2893 rval = QLA_FUNCTION_FAILED;
2897 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2898 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2899 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2901 if (!elsio->u.els_plogi.els_resp_pyld) {
2902 rval = QLA_FUNCTION_FAILED;
2906 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2908 memset(ptr, 0, sizeof(struct els_plogi_payload));
2909 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2910 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2911 &ha->plogi_els_payld.data,
2912 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2914 elsio->u.els_plogi.els_cmd = els_opcode;
2915 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2917 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2918 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2919 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2921 rval = qla2x00_start_sp(sp);
2922 if (rval != QLA_SUCCESS) {
2923 rval = QLA_FUNCTION_FAILED;
2925 ql_dbg(ql_dbg_disc, vha, 0x3074,
2926 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2927 sp->name, sp->handle, fcport->loop_id,
2928 fcport->d_id.b24, vha->d_id.b24);
2932 wait_for_completion(&elsio->u.els_plogi.comp);
2934 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2935 rval = QLA_FUNCTION_FAILED;
2941 fcport->flags &= ~(FCF_ASYNC_SENT);
2942 if (elsio->u.els_plogi.els_plogi_pyld)
2943 dma_free_coherent(&sp->vha->hw->pdev->dev,
2944 elsio->u.els_plogi.tx_size,
2945 elsio->u.els_plogi.els_plogi_pyld,
2946 elsio->u.els_plogi.els_plogi_pyld_dma);
2948 if (elsio->u.els_plogi.els_resp_pyld)
2949 dma_free_coherent(&sp->vha->hw->pdev->dev,
2950 elsio->u.els_plogi.rx_size,
2951 elsio->u.els_plogi.els_resp_pyld,
2952 elsio->u.els_plogi.els_resp_pyld_dma);
2960 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2962 struct bsg_job *bsg_job = sp->u.bsg_job;
2963 struct fc_bsg_request *bsg_request = bsg_job->request;
2965 els_iocb->entry_type = ELS_IOCB_TYPE;
2966 els_iocb->entry_count = 1;
2967 els_iocb->sys_define = 0;
2968 els_iocb->entry_status = 0;
2969 els_iocb->handle = sp->handle;
2970 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2971 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2972 els_iocb->vp_index = sp->vha->vp_idx;
2973 els_iocb->sof_type = EST_SOFI3;
2974 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2977 sp->type == SRB_ELS_CMD_RPT ?
2978 bsg_request->rqst_data.r_els.els_code :
2979 bsg_request->rqst_data.h_els.command_code;
2980 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2981 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2982 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2983 els_iocb->control_flags = 0;
2984 els_iocb->rx_byte_count =
2985 cpu_to_le32(bsg_job->reply_payload.payload_len);
2986 els_iocb->tx_byte_count =
2987 cpu_to_le32(bsg_job->request_payload.payload_len);
2989 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2990 (bsg_job->request_payload.sg_list)));
2991 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2992 (bsg_job->request_payload.sg_list)));
2993 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2994 (bsg_job->request_payload.sg_list));
2996 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2997 (bsg_job->reply_payload.sg_list)));
2998 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2999 (bsg_job->reply_payload.sg_list)));
3000 els_iocb->rx_len = cpu_to_le32(sg_dma_len
3001 (bsg_job->reply_payload.sg_list));
3003 sp->vha->qla_stats.control_requests++;
3007 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3009 uint16_t avail_dsds;
3011 struct scatterlist *sg;
3014 scsi_qla_host_t *vha = sp->vha;
3015 struct qla_hw_data *ha = vha->hw;
3016 struct bsg_job *bsg_job = sp->u.bsg_job;
3017 int entry_count = 1;
3019 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3020 ct_iocb->entry_type = CT_IOCB_TYPE;
3021 ct_iocb->entry_status = 0;
3022 ct_iocb->handle1 = sp->handle;
3023 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3024 ct_iocb->status = cpu_to_le16(0);
3025 ct_iocb->control_flags = cpu_to_le16(0);
3026 ct_iocb->timeout = 0;
3027 ct_iocb->cmd_dsd_count =
3028 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3029 ct_iocb->total_dsd_count =
3030 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3031 ct_iocb->req_bytecount =
3032 cpu_to_le32(bsg_job->request_payload.payload_len);
3033 ct_iocb->rsp_bytecount =
3034 cpu_to_le32(bsg_job->reply_payload.payload_len);
3036 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
3037 (bsg_job->request_payload.sg_list)));
3038 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
3039 (bsg_job->request_payload.sg_list)));
3040 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
3042 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
3043 (bsg_job->reply_payload.sg_list)));
3044 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
3045 (bsg_job->reply_payload.sg_list)));
3046 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
3049 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
3051 tot_dsds = bsg_job->reply_payload.sg_cnt;
3053 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3055 cont_a64_entry_t *cont_pkt;
3057 /* Allocate additional continuation packets? */
3058 if (avail_dsds == 0) {
3060 * Five DSDs are available in the Cont.
3063 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3064 vha->hw->req_q_map[0]);
3065 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3070 sle_dma = sg_dma_address(sg);
3071 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3072 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3073 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3076 ct_iocb->entry_count = entry_count;
3078 sp->vha->qla_stats.control_requests++;
3082 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3084 uint16_t avail_dsds;
3086 struct scatterlist *sg;
3088 uint16_t cmd_dsds, rsp_dsds;
3089 scsi_qla_host_t *vha = sp->vha;
3090 struct qla_hw_data *ha = vha->hw;
3091 struct bsg_job *bsg_job = sp->u.bsg_job;
3092 int entry_count = 1;
3093 cont_a64_entry_t *cont_pkt = NULL;
3095 ct_iocb->entry_type = CT_IOCB_TYPE;
3096 ct_iocb->entry_status = 0;
3097 ct_iocb->sys_define = 0;
3098 ct_iocb->handle = sp->handle;
3100 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3101 ct_iocb->vp_index = sp->vha->vp_idx;
3102 ct_iocb->comp_status = cpu_to_le16(0);
3104 cmd_dsds = bsg_job->request_payload.sg_cnt;
3105 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3107 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3108 ct_iocb->timeout = 0;
3109 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3110 ct_iocb->cmd_byte_count =
3111 cpu_to_le32(bsg_job->request_payload.payload_len);
3114 cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
3117 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3120 /* Allocate additional continuation packets? */
3121 if (avail_dsds == 0) {
3123 * Five DSDs are available in the Cont.
3126 cont_pkt = qla2x00_prep_cont_type1_iocb(
3127 vha, ha->req_q_map[0]);
3128 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3133 sle_dma = sg_dma_address(sg);
3134 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3135 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3136 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3142 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3145 /* Allocate additional continuation packets? */
3146 if (avail_dsds == 0) {
3148 * Five DSDs are available in the Cont.
3151 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3153 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3158 sle_dma = sg_dma_address(sg);
3159 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3160 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3161 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3164 ct_iocb->entry_count = entry_count;
3168 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3169 * @sp: command to send to the ISP
3171 * Returns non-zero if a failure occurred, else zero.
3174 qla82xx_start_scsi(srb_t *sp)
3177 unsigned long flags;
3178 struct scsi_cmnd *cmd;
3185 struct device_reg_82xx __iomem *reg;
3188 uint8_t additional_cdb_len;
3189 struct ct6_dsd *ctx;
3190 struct scsi_qla_host *vha = sp->vha;
3191 struct qla_hw_data *ha = vha->hw;
3192 struct req_que *req = NULL;
3193 struct rsp_que *rsp = NULL;
3195 /* Setup device pointers. */
3196 reg = &ha->iobase->isp82;
3197 cmd = GET_CMD_SP(sp);
3199 rsp = ha->rsp_q_map[0];
3201 /* So we know we haven't pci_map'ed anything yet */
3204 dbval = 0x04 | (ha->portnum << 5);
3206 /* Send marker if required */
3207 if (vha->marker_needed != 0) {
3208 if (qla2x00_marker(vha, ha->base_qpair,
3209 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3210 ql_log(ql_log_warn, vha, 0x300c,
3211 "qla2x00_marker failed for cmd=%p.\n", cmd);
3212 return QLA_FUNCTION_FAILED;
3214 vha->marker_needed = 0;
3217 /* Acquire ring specific lock */
3218 spin_lock_irqsave(&ha->hardware_lock, flags);
3220 /* Check for room in outstanding command list. */
3221 handle = req->current_outstanding_cmd;
3222 for (index = 1; index < req->num_outstanding_cmds; index++) {
3224 if (handle == req->num_outstanding_cmds)
3226 if (!req->outstanding_cmds[handle])
3229 if (index == req->num_outstanding_cmds)
3232 /* Map the sg table so we have an accurate count of sg entries needed */
3233 if (scsi_sg_count(cmd)) {
3234 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3235 scsi_sg_count(cmd), cmd->sc_data_direction);
3236 if (unlikely(!nseg))
3243 if (tot_dsds > ql2xshiftctondsd) {
3244 struct cmd_type_6 *cmd_pkt;
3245 uint16_t more_dsd_lists = 0;
3246 struct dsd_dma *dsd_ptr;
3249 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3250 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3251 ql_dbg(ql_dbg_io, vha, 0x300d,
3252 "Num of DSD list %d is than %d for cmd=%p.\n",
3253 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3258 if (more_dsd_lists <= ha->gbl_dsd_avail)
3259 goto sufficient_dsds;
3261 more_dsd_lists -= ha->gbl_dsd_avail;
3263 for (i = 0; i < more_dsd_lists; i++) {
3264 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3266 ql_log(ql_log_fatal, vha, 0x300e,
3267 "Failed to allocate memory for dsd_dma "
3268 "for cmd=%p.\n", cmd);
3272 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3273 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3274 if (!dsd_ptr->dsd_addr) {
3276 ql_log(ql_log_fatal, vha, 0x300f,
3277 "Failed to allocate memory for dsd_addr "
3278 "for cmd=%p.\n", cmd);
3281 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3282 ha->gbl_dsd_avail++;
3288 if (req->cnt < (req_cnt + 2)) {
3289 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3290 ®->req_q_out[0]);
3291 if (req->ring_index < cnt)
3292 req->cnt = cnt - req->ring_index;
3294 req->cnt = req->length -
3295 (req->ring_index - cnt);
3296 if (req->cnt < (req_cnt + 2))
3300 ctx = sp->u.scmd.ctx =
3301 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3303 ql_log(ql_log_fatal, vha, 0x3010,
3304 "Failed to allocate ctx for cmd=%p.\n", cmd);
3308 memset(ctx, 0, sizeof(struct ct6_dsd));
3309 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3310 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3311 if (!ctx->fcp_cmnd) {
3312 ql_log(ql_log_fatal, vha, 0x3011,
3313 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3317 /* Initialize the DSD list and dma handle */
3318 INIT_LIST_HEAD(&ctx->dsd_list);
3319 ctx->dsd_use_cnt = 0;
3321 if (cmd->cmd_len > 16) {
3322 additional_cdb_len = cmd->cmd_len - 16;
3323 if ((cmd->cmd_len % 4) != 0) {
3324 /* SCSI command bigger than 16 bytes must be
3327 ql_log(ql_log_warn, vha, 0x3012,
3328 "scsi cmd len %d not multiple of 4 "
3329 "for cmd=%p.\n", cmd->cmd_len, cmd);
3330 goto queuing_error_fcp_cmnd;
3332 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3334 additional_cdb_len = 0;
3335 ctx->fcp_cmnd_len = 12 + 16 + 4;
3338 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3339 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3341 /* Zero out remaining portion of packet. */
3342 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3343 clr_ptr = (uint32_t *)cmd_pkt + 2;
3344 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3345 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3347 /* Set NPORT-ID and LUN number*/
3348 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3349 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3350 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3351 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3352 cmd_pkt->vp_index = sp->vha->vp_idx;
3354 /* Build IOCB segments */
3355 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3356 goto queuing_error_fcp_cmnd;
3358 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3359 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3361 /* build FCP_CMND IU */
3362 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3363 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3365 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3366 ctx->fcp_cmnd->additional_cdb_len |= 1;
3367 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3368 ctx->fcp_cmnd->additional_cdb_len |= 2;
3370 /* Populate the FCP_PRIO. */
3371 if (ha->flags.fcp_prio_enabled)
3372 ctx->fcp_cmnd->task_attribute |=
3373 sp->fcport->fcp_prio << 3;
3375 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3377 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3378 additional_cdb_len);
3379 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3381 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3382 cmd_pkt->fcp_cmnd_dseg_address[0] =
3383 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3384 cmd_pkt->fcp_cmnd_dseg_address[1] =
3385 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3387 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3388 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3389 /* Set total data segment count. */
3390 cmd_pkt->entry_count = (uint8_t)req_cnt;
3391 /* Specify response queue number where
3392 * completion should happen
3394 cmd_pkt->entry_status = (uint8_t) rsp->id;
3396 struct cmd_type_7 *cmd_pkt;
3398 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3399 if (req->cnt < (req_cnt + 2)) {
3400 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3401 ®->req_q_out[0]);
3402 if (req->ring_index < cnt)
3403 req->cnt = cnt - req->ring_index;
3405 req->cnt = req->length -
3406 (req->ring_index - cnt);
3408 if (req->cnt < (req_cnt + 2))
3411 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3412 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3414 /* Zero out remaining portion of packet. */
3415 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3416 clr_ptr = (uint32_t *)cmd_pkt + 2;
3417 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3418 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3420 /* Set NPORT-ID and LUN number*/
3421 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3422 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3423 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3424 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3425 cmd_pkt->vp_index = sp->vha->vp_idx;
3427 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3428 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3429 sizeof(cmd_pkt->lun));
3431 /* Populate the FCP_PRIO. */
3432 if (ha->flags.fcp_prio_enabled)
3433 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3435 /* Load SCSI command packet. */
3436 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3437 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3439 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3441 /* Build IOCB segments */
3442 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3444 /* Set total data segment count. */
3445 cmd_pkt->entry_count = (uint8_t)req_cnt;
3446 /* Specify response queue number where
3447 * completion should happen.
3449 cmd_pkt->entry_status = (uint8_t) rsp->id;
3452 /* Build command packet. */
3453 req->current_outstanding_cmd = handle;
3454 req->outstanding_cmds[handle] = sp;
3455 sp->handle = handle;
3456 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3457 req->cnt -= req_cnt;
3460 /* Adjust ring index. */
3462 if (req->ring_index == req->length) {
3463 req->ring_index = 0;
3464 req->ring_ptr = req->ring;
3468 sp->flags |= SRB_DMA_VALID;
3470 /* Set chip new ring index. */
3471 /* write, read and verify logic */
3472 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3474 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3476 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3478 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3479 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3484 /* Manage unprocessed RIO/ZIO commands in response queue. */
3485 if (vha->flags.process_response_queue &&
3486 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3487 qla24xx_process_response_queue(vha, rsp);
3489 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3492 queuing_error_fcp_cmnd:
3493 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3496 scsi_dma_unmap(cmd);
3498 if (sp->u.scmd.ctx) {
3499 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3500 sp->u.scmd.ctx = NULL;
3502 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3504 return QLA_FUNCTION_FAILED;
3508 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3510 struct srb_iocb *aio = &sp->u.iocb_cmd;
3511 scsi_qla_host_t *vha = sp->vha;
3512 struct req_que *req = sp->qpair->req;
3514 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3515 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3516 abt_iocb->entry_count = 1;
3517 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3519 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3520 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3521 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3522 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3524 abt_iocb->handle_to_abort =
3525 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3526 aio->u.abt.cmd_hndl));
3527 abt_iocb->vp_index = vha->vp_idx;
3528 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3529 /* Send the command to the firmware */
3534 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3538 mbx->entry_type = MBX_IOCB_TYPE;
3539 mbx->handle = sp->handle;
3540 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3542 for (i = 0; i < sz; i++)
3543 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3547 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3549 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3550 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3551 ct_pkt->handle = sp->handle;
3554 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3555 struct nack_to_isp *nack)
3557 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3559 nack->entry_type = NOTIFY_ACK_TYPE;
3560 nack->entry_count = 1;
3561 nack->ox_id = ntfy->ox_id;
3563 nack->u.isp24.handle = sp->handle;
3564 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3565 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3566 nack->u.isp24.flags = ntfy->u.isp24.flags &
3567 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3569 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3570 nack->u.isp24.status = ntfy->u.isp24.status;
3571 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3572 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3573 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3574 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3575 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3576 nack->u.isp24.srr_flags = 0;
3577 nack->u.isp24.srr_reject_code = 0;
3578 nack->u.isp24.srr_reject_code_expl = 0;
3579 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3583 * Build NVME LS request
3586 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3588 struct srb_iocb *nvme;
3589 int rval = QLA_SUCCESS;
3591 nvme = &sp->u.iocb_cmd;
3592 cmd_pkt->entry_type = PT_LS4_REQUEST;
3593 cmd_pkt->entry_count = 1;
3594 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3596 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3597 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3598 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3600 cmd_pkt->tx_dseg_count = 1;
3601 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3602 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3603 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3604 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3606 cmd_pkt->rx_dseg_count = 1;
3607 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3608 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3609 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3610 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3616 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3620 vce->entry_type = VP_CTRL_IOCB_TYPE;
3621 vce->handle = sp->handle;
3622 vce->entry_count = 1;
3623 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3624 vce->vp_count = cpu_to_le16(1);
3627 * index map in firmware starts with 1; decrement index
3628 * this is ok as we never use index 0
3630 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3631 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3632 vce->vp_idx_map[map] |= 1 << pos;
3636 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3638 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3639 logio->control_flags =
3640 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3642 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3643 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3644 logio->port_id[1] = sp->fcport->d_id.b.area;
3645 logio->port_id[2] = sp->fcport->d_id.b.domain;
3646 logio->vp_index = sp->fcport->vha->vp_idx;
3650 qla2x00_start_sp(srb_t *sp)
3652 int rval = QLA_SUCCESS;
3653 scsi_qla_host_t *vha = sp->vha;
3654 struct qla_hw_data *ha = vha->hw;
3655 struct qla_qpair *qp = sp->qpair;
3657 unsigned long flags;
3659 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3660 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3663 ql_log(ql_log_warn, vha, 0x700c,
3664 "qla2x00_alloc_iocbs failed.\n");
3670 IS_FWI2_CAPABLE(ha) ?
3671 qla24xx_login_iocb(sp, pkt) :
3672 qla2x00_login_iocb(sp, pkt);
3675 qla24xx_prli_iocb(sp, pkt);
3677 case SRB_LOGOUT_CMD:
3678 IS_FWI2_CAPABLE(ha) ?
3679 qla24xx_logout_iocb(sp, pkt) :
3680 qla2x00_logout_iocb(sp, pkt);
3682 case SRB_ELS_CMD_RPT:
3683 case SRB_ELS_CMD_HST:
3684 qla24xx_els_iocb(sp, pkt);
3687 IS_FWI2_CAPABLE(ha) ?
3688 qla24xx_ct_iocb(sp, pkt) :
3689 qla2x00_ct_iocb(sp, pkt);
3692 IS_FWI2_CAPABLE(ha) ?
3693 qla24xx_adisc_iocb(sp, pkt) :
3694 qla2x00_adisc_iocb(sp, pkt);
3698 qlafx00_tm_iocb(sp, pkt) :
3699 qla24xx_tm_iocb(sp, pkt);
3701 case SRB_FXIOCB_DCMD:
3702 case SRB_FXIOCB_BCMD:
3703 qlafx00_fxdisc_iocb(sp, pkt);
3706 qla_nvme_ls(sp, pkt);
3710 qlafx00_abort_iocb(sp, pkt) :
3711 qla24xx_abort_iocb(sp, pkt);
3714 qla24xx_els_logo_iocb(sp, pkt);
3716 case SRB_CT_PTHRU_CMD:
3717 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3720 qla2x00_mb_iocb(sp, pkt);
3722 case SRB_NACK_PLOGI:
3725 qla2x00_send_notify_ack_iocb(sp, pkt);
3728 qla25xx_ctrlvp_iocb(sp, pkt);
3731 qla24xx_prlo_iocb(sp, pkt);
3738 qla2x00_start_iocbs(vha, qp->req);
3740 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3745 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3746 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3748 uint16_t avail_dsds;
3750 uint32_t req_data_len = 0;
3751 uint32_t rsp_data_len = 0;
3752 struct scatterlist *sg;
3754 int entry_count = 1;
3755 struct bsg_job *bsg_job = sp->u.bsg_job;
3757 /*Update entry type to indicate bidir command */
3758 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3760 /* Set the transfer direction, in this set both flags
3761 * Also set the BD_WRAP_BACK flag, firmware will take care
3762 * assigning DID=SID for outgoing pkts.
3764 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3765 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3766 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3769 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3770 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3771 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3772 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3774 vha->bidi_stats.transfer_bytes += req_data_len;
3775 vha->bidi_stats.io_count++;
3777 vha->qla_stats.output_bytes += req_data_len;
3778 vha->qla_stats.output_requests++;
3780 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3781 * are bundled in continuation iocb
3784 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3788 for_each_sg(bsg_job->request_payload.sg_list, sg,
3789 bsg_job->request_payload.sg_cnt, index) {
3791 cont_a64_entry_t *cont_pkt;
3793 /* Allocate additional continuation packets */
3794 if (avail_dsds == 0) {
3795 /* Continuation type 1 IOCB can accomodate
3798 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3799 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3803 sle_dma = sg_dma_address(sg);
3804 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3805 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3806 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3809 /* For read request DSD will always goes to continuation IOCB
3810 * and follow the write DSD. If there is room on the current IOCB
3811 * then it is added to that IOCB else new continuation IOCB is
3814 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3815 bsg_job->reply_payload.sg_cnt, index) {
3817 cont_a64_entry_t *cont_pkt;
3819 /* Allocate additional continuation packets */
3820 if (avail_dsds == 0) {
3821 /* Continuation type 1 IOCB can accomodate
3824 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3825 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3829 sle_dma = sg_dma_address(sg);
3830 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3831 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3832 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3835 /* This value should be same as number of IOCB required for this cmd */
3836 cmd_pkt->entry_count = entry_count;
3840 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3843 struct qla_hw_data *ha = vha->hw;
3844 unsigned long flags;
3850 struct cmd_bidir *cmd_pkt = NULL;
3851 struct rsp_que *rsp;
3852 struct req_que *req;
3853 int rval = EXT_STATUS_OK;
3857 rsp = ha->rsp_q_map[0];
3860 /* Send marker if required */
3861 if (vha->marker_needed != 0) {
3862 if (qla2x00_marker(vha, ha->base_qpair,
3863 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3864 return EXT_STATUS_MAILBOX;
3865 vha->marker_needed = 0;
3868 /* Acquire ring specific lock */
3869 spin_lock_irqsave(&ha->hardware_lock, flags);
3871 /* Check for room in outstanding command list. */
3872 handle = req->current_outstanding_cmd;
3873 for (index = 1; index < req->num_outstanding_cmds; index++) {
3875 if (handle == req->num_outstanding_cmds)
3877 if (!req->outstanding_cmds[handle])
3881 if (index == req->num_outstanding_cmds) {
3882 rval = EXT_STATUS_BUSY;
3886 /* Calculate number of IOCB required */
3887 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3889 /* Check for room on request queue. */
3890 if (req->cnt < req_cnt + 2) {
3891 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3892 RD_REG_DWORD_RELAXED(req->req_q_out);
3893 if (req->ring_index < cnt)
3894 req->cnt = cnt - req->ring_index;
3896 req->cnt = req->length -
3897 (req->ring_index - cnt);
3899 if (req->cnt < req_cnt + 2) {
3900 rval = EXT_STATUS_BUSY;
3904 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3905 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3907 /* Zero out remaining portion of packet. */
3908 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3909 clr_ptr = (uint32_t *)cmd_pkt + 2;
3910 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3912 /* Set NPORT-ID (of vha)*/
3913 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3914 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3915 cmd_pkt->port_id[1] = vha->d_id.b.area;
3916 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3918 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3919 cmd_pkt->entry_status = (uint8_t) rsp->id;
3920 /* Build command packet. */
3921 req->current_outstanding_cmd = handle;
3922 req->outstanding_cmds[handle] = sp;
3923 sp->handle = handle;
3924 req->cnt -= req_cnt;
3926 /* Send the command to the firmware */
3928 qla2x00_start_iocbs(vha, req);
3930 spin_unlock_irqrestore(&ha->hardware_lock, flags);