2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds)
58 iocbs += (dsds - 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds)
80 iocbs += (dsds - 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
100 if (req->ring_index == req->length) {
102 req->ring_ptr = req->ring;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 cont_a64_entry_t *cont_pkt;
127 /* Adjust ring index. */
129 if (req->ring_index == req->length) {
131 req->ring_ptr = req->ring;
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
140 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141 cpu_to_le32(CONTINUE_A64_TYPE);
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
150 uint8_t guard = scsi_host_get_guard(cmd->device->host);
152 /* We always use DIFF Bundling for best performance */
155 /* Translate SCSI opcode to a protection opcode */
156 switch (scsi_get_prot_op(cmd)) {
157 case SCSI_PROT_READ_STRIP:
158 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
160 case SCSI_PROT_WRITE_INSERT:
161 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163 case SCSI_PROT_READ_INSERT:
164 *fw_prot_opts |= PO_MODE_DIF_INSERT;
166 case SCSI_PROT_WRITE_STRIP:
167 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
169 case SCSI_PROT_READ_PASS:
170 case SCSI_PROT_WRITE_PASS:
171 if (guard & SHOST_DIX_GUARD_IP)
172 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 default: /* Normal Request */
177 *fw_prot_opts |= PO_MODE_DIF_PASS;
181 return scsi_prot_sg_count(cmd);
185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186 * capable IOCB types.
188 * @sp: SRB command to process
189 * @cmd_pkt: Command type 2 IOCB
190 * @tot_dsds: Total number of segments to transfer
192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197 scsi_qla_host_t *vha;
198 struct scsi_cmnd *cmd;
199 struct scatterlist *sg;
202 cmd = GET_CMD_SP(sp);
204 /* Update entry type to indicate Command Type 2 IOCB */
205 *((uint32_t *)(&cmd_pkt->entry_type)) =
206 cpu_to_le32(COMMAND_TYPE);
208 /* No data transfer */
209 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
210 cmd_pkt->byte_count = cpu_to_le32(0);
215 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
217 /* Three DSDs are available in the Command Type 2 IOCB */
219 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221 /* Load data segments */
222 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223 cont_entry_t *cont_pkt;
225 /* Allocate additional continuation packets? */
226 if (avail_dsds == 0) {
228 * Seven DSDs are available in the Continuation
231 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
236 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244 * capable IOCB types.
246 * @sp: SRB command to process
247 * @cmd_pkt: Command type 3 IOCB
248 * @tot_dsds: Total number of segments to transfer
250 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255 scsi_qla_host_t *vha;
256 struct scsi_cmnd *cmd;
257 struct scatterlist *sg;
260 cmd = GET_CMD_SP(sp);
262 /* Update entry type to indicate Command Type 3 IOCB */
263 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
265 /* No data transfer */
266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267 cmd_pkt->byte_count = cpu_to_le32(0);
272 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
274 /* Two DSDs are available in the Command Type 3 IOCB */
276 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
278 /* Load data segments */
279 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
281 cont_a64_entry_t *cont_pkt;
283 /* Allocate additional continuation packets? */
284 if (avail_dsds == 0) {
286 * Five DSDs are available in the Continuation
289 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
294 sle_dma = sg_dma_address(sg);
295 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
306 * Returns non-zero if a failure occurred, else zero.
309 qla2x00_start_scsi(srb_t *sp)
313 scsi_qla_host_t *vha;
314 struct scsi_cmnd *cmd;
318 cmd_entry_t *cmd_pkt;
322 struct device_reg_2xxx __iomem *reg;
323 struct qla_hw_data *ha;
327 /* Setup device pointers. */
330 reg = &ha->iobase->isp;
331 cmd = GET_CMD_SP(sp);
332 req = ha->req_q_map[0];
333 rsp = ha->rsp_q_map[0];
334 /* So we know we haven't pci_map'ed anything yet */
337 /* Send marker if required */
338 if (vha->marker_needed != 0) {
339 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
341 return (QLA_FUNCTION_FAILED);
343 vha->marker_needed = 0;
346 /* Acquire ring specific lock */
347 spin_lock_irqsave(&ha->hardware_lock, flags);
349 /* Check for room in outstanding command list. */
350 handle = req->current_outstanding_cmd;
351 for (index = 1; index < req->num_outstanding_cmds; index++) {
353 if (handle == req->num_outstanding_cmds)
355 if (!req->outstanding_cmds[handle])
358 if (index == req->num_outstanding_cmds)
361 /* Map the sg table so we have an accurate count of sg entries needed */
362 if (scsi_sg_count(cmd)) {
363 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
364 scsi_sg_count(cmd), cmd->sc_data_direction);
372 /* Calculate the number of request entries needed. */
373 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
374 if (req->cnt < (req_cnt + 2)) {
375 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
376 if (req->ring_index < cnt)
377 req->cnt = cnt - req->ring_index;
379 req->cnt = req->length -
380 (req->ring_index - cnt);
381 /* If still no head room then bail out */
382 if (req->cnt < (req_cnt + 2))
386 /* Build command packet */
387 req->current_outstanding_cmd = handle;
388 req->outstanding_cmds[handle] = sp;
390 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
393 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
394 cmd_pkt->handle = handle;
395 /* Zero out remaining portion of packet. */
396 clr_ptr = (uint32_t *)cmd_pkt + 2;
397 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
398 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
400 /* Set target ID and LUN number*/
401 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
402 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
403 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
405 /* Load SCSI command packet. */
406 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
407 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
409 /* Build IOCB segments */
410 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
412 /* Set total data segment count. */
413 cmd_pkt->entry_count = (uint8_t)req_cnt;
416 /* Adjust ring index. */
418 if (req->ring_index == req->length) {
420 req->ring_ptr = req->ring;
424 sp->flags |= SRB_DMA_VALID;
426 /* Set chip new ring index. */
427 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
430 /* Manage unprocessed RIO/ZIO commands in response queue. */
431 if (vha->flags.process_response_queue &&
432 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
433 qla2x00_process_response_queue(rsp);
435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
436 return (QLA_SUCCESS);
442 spin_unlock_irqrestore(&ha->hardware_lock, flags);
444 return (QLA_FUNCTION_FAILED);
448 * qla2x00_start_iocbs() - Execute the IOCB command
450 * @req: request queue
453 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
455 struct qla_hw_data *ha = vha->hw;
456 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
458 if (IS_P3P_TYPE(ha)) {
459 qla82xx_start_iocbs(vha);
461 /* Adjust ring index. */
463 if (req->ring_index == req->length) {
465 req->ring_ptr = req->ring;
469 /* Set chip new ring index. */
470 if (ha->mqenable || IS_QLA27XX(ha)) {
471 WRT_REG_DWORD(req->req_q_in, req->ring_index);
472 } else if (IS_QLA83XX(ha)) {
473 WRT_REG_DWORD(req->req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
475 } else if (IS_QLAFX00(ha)) {
476 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
477 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
478 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
479 } else if (IS_FWI2_CAPABLE(ha)) {
480 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
481 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
483 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
485 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
491 * qla2x00_marker() - Send a marker IOCB to the firmware.
493 * @req: request queue
494 * @rsp: response queue
497 * @type: marker modifier
499 * Can be called from both normal and interrupt context.
501 * Returns non-zero if a failure occurred, else zero.
504 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
505 struct rsp_que *rsp, uint16_t loop_id,
506 uint64_t lun, uint8_t type)
509 struct mrk_entry_24xx *mrk24 = NULL;
511 struct qla_hw_data *ha = vha->hw;
512 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
514 req = ha->req_q_map[0];
515 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
517 ql_log(ql_log_warn, base_vha, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
520 return (QLA_FUNCTION_FAILED);
523 mrk->entry_type = MARKER_TYPE;
524 mrk->modifier = type;
525 if (type != MK_SYNC_ALL) {
526 if (IS_FWI2_CAPABLE(ha)) {
527 mrk24 = (struct mrk_entry_24xx *) mrk;
528 mrk24->nport_handle = cpu_to_le16(loop_id);
529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531 mrk24->vp_index = vha->vp_idx;
532 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
534 SET_TARGET_ID(ha, mrk->target, loop_id);
535 mrk->lun = cpu_to_le16((uint16_t)lun);
540 qla2x00_start_iocbs(vha, req);
542 return (QLA_SUCCESS);
546 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
547 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
551 unsigned long flags = 0;
553 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
554 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
555 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
561 * qla2x00_issue_marker
564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
565 * Might release it, then reaquire.
567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
570 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
571 MK_SYNC_ALL) != QLA_SUCCESS)
572 return QLA_FUNCTION_FAILED;
574 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
575 MK_SYNC_ALL) != QLA_SUCCESS)
576 return QLA_FUNCTION_FAILED;
578 vha->marker_needed = 0;
584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
587 uint32_t *cur_dsd = NULL;
588 scsi_qla_host_t *vha;
589 struct qla_hw_data *ha;
590 struct scsi_cmnd *cmd;
591 struct scatterlist *cur_seg;
595 uint8_t first_iocb = 1;
596 uint32_t dsd_list_len;
597 struct dsd_dma *dsd_ptr;
600 cmd = GET_CMD_SP(sp);
602 /* Update entry type to indicate Command Type 3 IOCB */
603 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
605 /* No data transfer */
606 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
607 cmd_pkt->byte_count = cpu_to_le32(0);
614 /* Set transfer direction */
615 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
616 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
617 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
618 vha->qla_stats.output_requests++;
619 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
620 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
621 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622 vha->qla_stats.input_requests++;
625 cur_seg = scsi_sglist(cmd);
626 ctx = GET_CMD_CTX_SP(sp);
629 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630 QLA_DSDS_PER_IOCB : tot_dsds;
631 tot_dsds -= avail_dsds;
632 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
634 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635 struct dsd_dma, list);
636 next_dsd = dsd_ptr->dsd_addr;
637 list_del(&dsd_ptr->list);
639 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
645 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
650 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652 *cur_dsd++ = cpu_to_le32(dsd_list_len);
654 cur_dsd = (uint32_t *)next_dsd;
658 sle_dma = sg_dma_address(cur_seg);
659 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
662 cur_seg = sg_next(cur_seg);
667 /* Null termination */
671 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677 * for Command Type 6.
679 * @dsds: number of data segment decriptors needed
681 * Returns the number of dsd list needed to store @dsds.
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds)
686 uint16_t dsd_lists = 0;
688 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689 if (dsds % QLA_DSDS_PER_IOCB)
696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
699 * @sp: SRB command to process
700 * @cmd_pkt: Command type 3 IOCB
701 * @tot_dsds: Total number of segments to transfer
702 * @req: pointer to request queue
705 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
706 uint16_t tot_dsds, struct req_que *req)
710 scsi_qla_host_t *vha;
711 struct scsi_cmnd *cmd;
712 struct scatterlist *sg;
715 cmd = GET_CMD_SP(sp);
717 /* Update entry type to indicate Command Type 3 IOCB */
718 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
720 /* No data transfer */
721 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
722 cmd_pkt->byte_count = cpu_to_le32(0);
728 /* Set transfer direction */
729 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
730 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
731 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
732 vha->qla_stats.output_requests++;
733 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
734 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
735 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
736 vha->qla_stats.input_requests++;
739 /* One DSD is available in the Command Type 3 IOCB */
741 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
743 /* Load data segments */
745 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
747 cont_a64_entry_t *cont_pkt;
749 /* Allocate additional continuation packets? */
750 if (avail_dsds == 0) {
752 * Five DSDs are available in the Continuation
755 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
756 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
760 sle_dma = sg_dma_address(sg);
761 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
762 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
763 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
768 struct fw_dif_context {
771 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
772 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
776 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
780 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
781 unsigned int protcnt)
783 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
785 switch (scsi_get_prot_type(cmd)) {
786 case SCSI_PROT_DIF_TYPE0:
788 * No check for ql2xenablehba_err_chk, as it would be an
789 * I/O error if hba tag generation is not done.
791 pkt->ref_tag = cpu_to_le32((uint32_t)
792 (0xffffffff & scsi_get_lba(cmd)));
794 if (!qla2x00_hba_err_chk_enabled(sp))
797 pkt->ref_tag_mask[0] = 0xff;
798 pkt->ref_tag_mask[1] = 0xff;
799 pkt->ref_tag_mask[2] = 0xff;
800 pkt->ref_tag_mask[3] = 0xff;
804 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805 * match LBA in CDB + N
807 case SCSI_PROT_DIF_TYPE2:
808 pkt->app_tag = cpu_to_le16(0);
809 pkt->app_tag_mask[0] = 0x0;
810 pkt->app_tag_mask[1] = 0x0;
812 pkt->ref_tag = cpu_to_le32((uint32_t)
813 (0xffffffff & scsi_get_lba(cmd)));
815 if (!qla2x00_hba_err_chk_enabled(sp))
818 /* enable ALL bytes of the ref tag */
819 pkt->ref_tag_mask[0] = 0xff;
820 pkt->ref_tag_mask[1] = 0xff;
821 pkt->ref_tag_mask[2] = 0xff;
822 pkt->ref_tag_mask[3] = 0xff;
825 /* For Type 3 protection: 16 bit GUARD only */
826 case SCSI_PROT_DIF_TYPE3:
827 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
828 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
833 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
836 case SCSI_PROT_DIF_TYPE1:
837 pkt->ref_tag = cpu_to_le32((uint32_t)
838 (0xffffffff & scsi_get_lba(cmd)));
839 pkt->app_tag = cpu_to_le16(0);
840 pkt->app_tag_mask[0] = 0x0;
841 pkt->app_tag_mask[1] = 0x0;
843 if (!qla2x00_hba_err_chk_enabled(sp))
846 /* enable ALL bytes of the ref tag */
847 pkt->ref_tag_mask[0] = 0xff;
848 pkt->ref_tag_mask[1] = 0xff;
849 pkt->ref_tag_mask[2] = 0xff;
850 pkt->ref_tag_mask[3] = 0xff;
856 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
859 struct scatterlist *sg;
860 uint32_t cumulative_partial, sg_len;
861 dma_addr_t sg_dma_addr;
863 if (sgx->num_bytes == sgx->tot_bytes)
867 cumulative_partial = sgx->tot_partial;
869 sg_dma_addr = sg_dma_address(sg);
870 sg_len = sg_dma_len(sg);
872 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
874 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
875 sgx->dma_len = (blk_sz - cumulative_partial);
876 sgx->tot_partial = 0;
877 sgx->num_bytes += blk_sz;
880 sgx->dma_len = sg_len - sgx->bytes_consumed;
881 sgx->tot_partial += sgx->dma_len;
885 sgx->bytes_consumed += sgx->dma_len;
887 if (sg_len == sgx->bytes_consumed) {
891 sgx->bytes_consumed = 0;
898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
899 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
902 uint8_t avail_dsds = 0;
903 uint32_t dsd_list_len;
904 struct dsd_dma *dsd_ptr;
905 struct scatterlist *sg_prot;
906 uint32_t *cur_dsd = dsd;
907 uint16_t used_dsds = tot_dsds;
908 uint32_t prot_int; /* protection interval */
912 uint32_t sle_dma_len, tot_prot_dma_len = 0;
913 struct scsi_cmnd *cmd;
915 memset(&sgx, 0, sizeof(struct qla2_sgx));
917 cmd = GET_CMD_SP(sp);
918 prot_int = cmd->device->sector_size;
920 sgx.tot_bytes = scsi_bufflen(cmd);
921 sgx.cur_sg = scsi_sglist(cmd);
924 sg_prot = scsi_prot_sglist(cmd);
926 prot_int = tc->blk_sz;
927 sgx.tot_bytes = tc->bufflen;
929 sg_prot = tc->prot_sg;
935 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
937 sle_dma = sgx.dma_addr;
938 sle_dma_len = sgx.dma_len;
940 /* Allocate additional continuation packets? */
941 if (avail_dsds == 0) {
942 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
943 QLA_DSDS_PER_IOCB : used_dsds;
944 dsd_list_len = (avail_dsds + 1) * 12;
945 used_dsds -= avail_dsds;
947 /* allocate tracking DS */
948 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
952 /* allocate new list */
953 dsd_ptr->dsd_addr = next_dsd =
954 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
955 &dsd_ptr->dsd_list_dma);
959 * Need to cleanup only this dsd_ptr, rest
960 * will be done by sp_free_dma()
967 list_add_tail(&dsd_ptr->list,
968 &((struct crc_context *)
969 sp->u.scmd.ctx)->dsd_list);
971 sp->flags |= SRB_CRC_CTX_DSD_VALID;
973 list_add_tail(&dsd_ptr->list,
974 &(tc->ctx->dsd_list));
975 *tc->ctx_dsd_alloced = 1;
979 /* add new list to cmd iocb or last list */
980 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
981 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
982 *cur_dsd++ = dsd_list_len;
983 cur_dsd = (uint32_t *)next_dsd;
985 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
986 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
987 *cur_dsd++ = cpu_to_le32(sle_dma_len);
991 /* Got a full protection interval */
992 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
995 tot_prot_dma_len += sle_dma_len;
996 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
997 tot_prot_dma_len = 0;
998 sg_prot = sg_next(sg_prot);
1001 partial = 1; /* So as to not re-enter this block */
1002 goto alloc_and_fill;
1005 /* Null termination */
1013 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1014 uint16_t tot_dsds, struct qla_tc_param *tc)
1017 uint8_t avail_dsds = 0;
1018 uint32_t dsd_list_len;
1019 struct dsd_dma *dsd_ptr;
1020 struct scatterlist *sg, *sgl;
1021 uint32_t *cur_dsd = dsd;
1023 uint16_t used_dsds = tot_dsds;
1024 struct scsi_cmnd *cmd;
1027 cmd = GET_CMD_SP(sp);
1028 sgl = scsi_sglist(cmd);
1037 for_each_sg(sgl, sg, tot_dsds, i) {
1040 /* Allocate additional continuation packets? */
1041 if (avail_dsds == 0) {
1042 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1043 QLA_DSDS_PER_IOCB : used_dsds;
1044 dsd_list_len = (avail_dsds + 1) * 12;
1045 used_dsds -= avail_dsds;
1047 /* allocate tracking DS */
1048 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1052 /* allocate new list */
1053 dsd_ptr->dsd_addr = next_dsd =
1054 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1055 &dsd_ptr->dsd_list_dma);
1059 * Need to cleanup only this dsd_ptr, rest
1060 * will be done by sp_free_dma()
1067 list_add_tail(&dsd_ptr->list,
1068 &((struct crc_context *)
1069 sp->u.scmd.ctx)->dsd_list);
1071 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1073 list_add_tail(&dsd_ptr->list,
1074 &(tc->ctx->dsd_list));
1075 *tc->ctx_dsd_alloced = 1;
1078 /* add new list to cmd iocb or last list */
1079 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1080 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1081 *cur_dsd++ = dsd_list_len;
1082 cur_dsd = (uint32_t *)next_dsd;
1084 sle_dma = sg_dma_address(sg);
1086 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1087 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1088 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1092 /* Null termination */
1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1101 uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1103 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1104 struct scatterlist *sg, *sgl;
1105 struct crc_context *difctx = NULL;
1106 struct scsi_qla_host *vha;
1108 uint avail_dsds = 0;
1109 uint used_dsds = tot_dsds;
1110 bool dif_local_dma_alloc = false;
1111 bool direction_to_device = false;
1115 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1116 sgl = scsi_prot_sglist(cmd);
1118 difctx = sp->u.scmd.ctx;
1119 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1120 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1121 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1122 __func__, cmd, difctx, sp);
1127 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1133 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1134 "%s: enter (write=%u)\n", __func__, direction_to_device);
1136 /* if initiator doing write or target doing read */
1137 if (direction_to_device) {
1138 for_each_sg(sgl, sg, tot_dsds, i) {
1139 dma_addr_t sle_phys = sg_phys(sg);
1141 /* If SGE addr + len flips bits in upper 32-bits */
1142 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1143 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1144 "%s: page boundary crossing (phys=%llx len=%x)\n",
1145 __func__, sle_phys, sg->length);
1148 ha->dif_bundle_crossed_pages++;
1149 dif_local_dma_alloc = true;
1151 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1153 "%s: difctx pointer is NULL\n",
1159 ha->dif_bundle_writes++;
1161 ha->dif_bundle_reads++;
1164 if (ql2xdifbundlinginternalbuffers)
1165 dif_local_dma_alloc = direction_to_device;
1167 if (dif_local_dma_alloc) {
1168 u32 track_difbundl_buf = 0;
1169 u32 ldma_sg_len = 0;
1172 difctx->no_dif_bundl = 0;
1173 difctx->dif_bundl_len = 0;
1175 /* Track DSD buffers */
1176 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1177 /* Track local DMA buffers */
1178 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1180 for_each_sg(sgl, sg, tot_dsds, i) {
1181 u32 sglen = sg_dma_len(sg);
1183 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1184 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1185 __func__, i, sg_phys(sg), sglen, ldma_sg_len,
1186 difctx->dif_bundl_len, ldma_needed);
1193 * Allocate list item to store
1196 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1199 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1200 "%s: failed alloc dsd_ptr\n",
1204 ha->dif_bundle_kallocs++;
1206 /* allocate dma buffer */
1207 dsd_ptr->dsd_addr = dma_pool_alloc
1208 (ha->dif_bundl_pool, GFP_ATOMIC,
1209 &dsd_ptr->dsd_list_dma);
1210 if (!dsd_ptr->dsd_addr) {
1211 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1212 "%s: failed alloc ->dsd_ptr\n",
1215 * need to cleanup only this
1216 * dsd_ptr rest will be done
1220 ha->dif_bundle_kallocs--;
1223 ha->dif_bundle_dma_allocs++;
1225 difctx->no_dif_bundl++;
1226 list_add_tail(&dsd_ptr->list,
1227 &difctx->ldif_dma_hndl_list);
1230 /* xfrlen is min of dma pool size and sglen */
1232 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1233 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1236 /* replace with local allocated dma buffer */
1237 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1238 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1239 difctx->dif_bundl_len);
1240 difctx->dif_bundl_len += xfrlen;
1242 ldma_sg_len += xfrlen;
1243 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1251 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1252 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1253 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1254 difctx->dif_bundl_len, difctx->no_dif_bundl,
1255 track_difbundl_buf);
1258 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1260 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1262 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1263 &difctx->ldif_dma_hndl_list, list) {
1264 u32 sglen = (difctx->dif_bundl_len >
1265 DIF_BUNDLING_DMA_POOL_SIZE) ?
1266 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1268 BUG_ON(track_difbundl_buf == 0);
1270 /* Allocate additional continuation packets? */
1271 if (avail_dsds == 0) {
1272 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1274 "%s: adding continuation iocb's\n",
1276 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1277 QLA_DSDS_PER_IOCB : used_dsds;
1278 dsd_list_len = (avail_dsds + 1) * 12;
1279 used_dsds -= avail_dsds;
1281 /* allocate tracking DS */
1282 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1284 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1285 "%s: failed alloc dsd_ptr\n",
1289 ha->dif_bundle_kallocs++;
1291 difctx->no_ldif_dsd++;
1292 /* allocate new list */
1294 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1295 &dsd_ptr->dsd_list_dma);
1296 if (!dsd_ptr->dsd_addr) {
1297 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1298 "%s: failed alloc ->dsd_addr\n",
1301 * need to cleanup only this dsd_ptr
1302 * rest will be done by sp_free_dma()
1305 ha->dif_bundle_kallocs--;
1308 ha->dif_bundle_dma_allocs++;
1311 list_add_tail(&dsd_ptr->list,
1312 &difctx->ldif_dsd_list);
1313 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1315 list_add_tail(&dsd_ptr->list,
1316 &difctx->ldif_dsd_list);
1317 tc->ctx_dsd_alloced = 1;
1320 /* add new list to cmd iocb or last list */
1322 cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1324 cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1325 *cur_dsd++ = dsd_list_len;
1326 cur_dsd = dsd_ptr->dsd_addr;
1328 *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma));
1329 *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma));
1330 *cur_dsd++ = cpu_to_le32(sglen);
1332 difctx->dif_bundl_len -= sglen;
1333 track_difbundl_buf--;
1336 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1337 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1338 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1340 for_each_sg(sgl, sg, tot_dsds, i) {
1343 /* Allocate additional continuation packets? */
1344 if (avail_dsds == 0) {
1345 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1346 QLA_DSDS_PER_IOCB : used_dsds;
1347 dsd_list_len = (avail_dsds + 1) * 12;
1348 used_dsds -= avail_dsds;
1350 /* allocate tracking DS */
1351 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1353 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1355 "%s: failed alloc dsd_dma...\n",
1360 /* allocate new list */
1362 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1363 &dsd_ptr->dsd_list_dma);
1364 if (!dsd_ptr->dsd_addr) {
1365 /* need to cleanup only this dsd_ptr */
1366 /* rest will be done by sp_free_dma() */
1372 list_add_tail(&dsd_ptr->list,
1374 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1376 list_add_tail(&dsd_ptr->list,
1378 tc->ctx_dsd_alloced = 1;
1381 /* add new list to cmd iocb or last list */
1383 cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1385 cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1386 *cur_dsd++ = dsd_list_len;
1387 cur_dsd = dsd_ptr->dsd_addr;
1389 sle_dma = sg_dma_address(sg);
1390 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1391 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1392 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1396 /* Null termination */
1403 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1404 * Type 6 IOCB types.
1406 * @sp: SRB command to process
1407 * @cmd_pkt: Command type 3 IOCB
1408 * @tot_dsds: Total number of segments to transfer
1409 * @tot_prot_dsds: Total number of segments with protection information
1410 * @fw_prot_opts: Protection options to be passed to firmware
1413 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1414 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1416 uint32_t *cur_dsd, *fcp_dl;
1417 scsi_qla_host_t *vha;
1418 struct scsi_cmnd *cmd;
1419 uint32_t total_bytes = 0;
1420 uint32_t data_bytes;
1422 uint8_t bundling = 1;
1424 struct crc_context *crc_ctx_pkt = NULL;
1425 struct qla_hw_data *ha;
1426 uint8_t additional_fcpcdb_len;
1427 uint16_t fcp_cmnd_len;
1428 struct fcp_cmnd *fcp_cmnd;
1429 dma_addr_t crc_ctx_dma;
1431 cmd = GET_CMD_SP(sp);
1433 /* Update entry type to indicate Command Type CRC_2 IOCB */
1434 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1439 /* No data transfer */
1440 data_bytes = scsi_bufflen(cmd);
1441 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1442 cmd_pkt->byte_count = cpu_to_le32(0);
1446 cmd_pkt->vp_index = sp->vha->vp_idx;
1448 /* Set transfer direction */
1449 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1450 cmd_pkt->control_flags =
1451 cpu_to_le16(CF_WRITE_DATA);
1452 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1453 cmd_pkt->control_flags =
1454 cpu_to_le16(CF_READ_DATA);
1457 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1458 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1459 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1460 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1463 /* Allocate CRC context from global pool */
1464 crc_ctx_pkt = sp->u.scmd.ctx =
1465 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1468 goto crc_queuing_error;
1470 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1472 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1475 crc_ctx_pkt->handle = cmd_pkt->handle;
1477 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1479 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1480 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1482 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1483 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1484 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1486 /* Determine SCSI command length -- align to 4 byte boundary */
1487 if (cmd->cmd_len > 16) {
1488 additional_fcpcdb_len = cmd->cmd_len - 16;
1489 if ((cmd->cmd_len % 4) != 0) {
1490 /* SCSI cmd > 16 bytes must be multiple of 4 */
1491 goto crc_queuing_error;
1493 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1495 additional_fcpcdb_len = 0;
1496 fcp_cmnd_len = 12 + 16 + 4;
1499 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1501 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1502 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1503 fcp_cmnd->additional_cdb_len |= 1;
1504 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1505 fcp_cmnd->additional_cdb_len |= 2;
1507 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1508 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1509 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1510 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1511 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1512 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1513 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1514 fcp_cmnd->task_management = 0;
1515 fcp_cmnd->task_attribute = TSK_SIMPLE;
1517 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1519 /* Compute dif len and adjust data len to incude protection */
1521 blk_size = cmd->device->sector_size;
1522 dif_bytes = (data_bytes / blk_size) * 8;
1524 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1525 case SCSI_PROT_READ_INSERT:
1526 case SCSI_PROT_WRITE_STRIP:
1527 total_bytes = data_bytes;
1528 data_bytes += dif_bytes;
1531 case SCSI_PROT_READ_STRIP:
1532 case SCSI_PROT_WRITE_INSERT:
1533 case SCSI_PROT_READ_PASS:
1534 case SCSI_PROT_WRITE_PASS:
1535 total_bytes = data_bytes + dif_bytes;
1541 if (!qla2x00_hba_err_chk_enabled(sp))
1542 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1543 /* HBA error checking enabled */
1544 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1545 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1546 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1547 SCSI_PROT_DIF_TYPE2))
1548 fw_prot_opts |= BIT_10;
1549 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1550 SCSI_PROT_DIF_TYPE3)
1551 fw_prot_opts |= BIT_11;
1555 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1558 * Configure Bundling if we need to fetch interlaving
1559 * protection PCI accesses
1561 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1562 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1563 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1565 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1568 /* Finish the common fields of CRC pkt */
1569 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1570 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1571 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1572 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1573 /* Fibre channel byte count */
1574 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1575 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1576 additional_fcpcdb_len);
1577 *fcp_dl = htonl(total_bytes);
1579 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1580 cmd_pkt->byte_count = cpu_to_le32(0);
1583 /* Walks data segments */
1585 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1587 if (!bundling && tot_prot_dsds) {
1588 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1589 cur_dsd, tot_dsds, NULL))
1590 goto crc_queuing_error;
1591 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1592 (tot_dsds - tot_prot_dsds), NULL))
1593 goto crc_queuing_error;
1595 if (bundling && tot_prot_dsds) {
1596 /* Walks dif segments */
1597 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1598 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1599 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1600 tot_prot_dsds, NULL))
1601 goto crc_queuing_error;
1606 /* Cleanup will be performed by the caller */
1608 return QLA_FUNCTION_FAILED;
1612 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1613 * @sp: command to send to the ISP
1615 * Returns non-zero if a failure occurred, else zero.
1618 qla24xx_start_scsi(srb_t *sp)
1621 unsigned long flags;
1625 struct cmd_type_7 *cmd_pkt;
1629 struct req_que *req = NULL;
1630 struct rsp_que *rsp = NULL;
1631 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1632 struct scsi_qla_host *vha = sp->vha;
1633 struct qla_hw_data *ha = vha->hw;
1635 /* Setup device pointers. */
1639 /* So we know we haven't pci_map'ed anything yet */
1642 /* Send marker if required */
1643 if (vha->marker_needed != 0) {
1644 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1646 return QLA_FUNCTION_FAILED;
1647 vha->marker_needed = 0;
1650 /* Acquire ring specific lock */
1651 spin_lock_irqsave(&ha->hardware_lock, flags);
1653 /* Check for room in outstanding command list. */
1654 handle = req->current_outstanding_cmd;
1655 for (index = 1; index < req->num_outstanding_cmds; index++) {
1657 if (handle == req->num_outstanding_cmds)
1659 if (!req->outstanding_cmds[handle])
1662 if (index == req->num_outstanding_cmds)
1665 /* Map the sg table so we have an accurate count of sg entries needed */
1666 if (scsi_sg_count(cmd)) {
1667 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1668 scsi_sg_count(cmd), cmd->sc_data_direction);
1669 if (unlikely(!nseg))
1675 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1676 if (req->cnt < (req_cnt + 2)) {
1677 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1678 RD_REG_DWORD_RELAXED(req->req_q_out);
1679 if (req->ring_index < cnt)
1680 req->cnt = cnt - req->ring_index;
1682 req->cnt = req->length -
1683 (req->ring_index - cnt);
1684 if (req->cnt < (req_cnt + 2))
1688 /* Build command packet. */
1689 req->current_outstanding_cmd = handle;
1690 req->outstanding_cmds[handle] = sp;
1691 sp->handle = handle;
1692 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1693 req->cnt -= req_cnt;
1695 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1696 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1698 /* Zero out remaining portion of packet. */
1699 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1700 clr_ptr = (uint32_t *)cmd_pkt + 2;
1701 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1702 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1704 /* Set NPORT-ID and LUN number*/
1705 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1706 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1707 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1708 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1709 cmd_pkt->vp_index = sp->vha->vp_idx;
1711 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1712 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1714 cmd_pkt->task = TSK_SIMPLE;
1716 /* Load SCSI command packet. */
1717 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1718 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1720 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1722 /* Build IOCB segments */
1723 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1725 /* Set total data segment count. */
1726 cmd_pkt->entry_count = (uint8_t)req_cnt;
1728 /* Adjust ring index. */
1730 if (req->ring_index == req->length) {
1731 req->ring_index = 0;
1732 req->ring_ptr = req->ring;
1736 sp->flags |= SRB_DMA_VALID;
1738 /* Set chip new ring index. */
1739 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1741 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1746 scsi_dma_unmap(cmd);
1748 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1750 return QLA_FUNCTION_FAILED;
1754 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1755 * @sp: command to send to the ISP
1757 * Returns non-zero if a failure occurred, else zero.
1760 qla24xx_dif_start_scsi(srb_t *sp)
1763 unsigned long flags;
1768 uint16_t req_cnt = 0;
1770 uint16_t tot_prot_dsds;
1771 uint16_t fw_prot_opts = 0;
1772 struct req_que *req = NULL;
1773 struct rsp_que *rsp = NULL;
1774 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1775 struct scsi_qla_host *vha = sp->vha;
1776 struct qla_hw_data *ha = vha->hw;
1777 struct cmd_type_crc_2 *cmd_pkt;
1778 uint32_t status = 0;
1780 #define QDSS_GOT_Q_SPACE BIT_0
1782 /* Only process protection or >16 cdb in this routine */
1783 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1784 if (cmd->cmd_len <= 16)
1785 return qla24xx_start_scsi(sp);
1788 /* Setup device pointers. */
1792 /* So we know we haven't pci_map'ed anything yet */
1795 /* Send marker if required */
1796 if (vha->marker_needed != 0) {
1797 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1799 return QLA_FUNCTION_FAILED;
1800 vha->marker_needed = 0;
1803 /* Acquire ring specific lock */
1804 spin_lock_irqsave(&ha->hardware_lock, flags);
1806 /* Check for room in outstanding command list. */
1807 handle = req->current_outstanding_cmd;
1808 for (index = 1; index < req->num_outstanding_cmds; index++) {
1810 if (handle == req->num_outstanding_cmds)
1812 if (!req->outstanding_cmds[handle])
1816 if (index == req->num_outstanding_cmds)
1819 /* Compute number of required data segments */
1820 /* Map the sg table so we have an accurate count of sg entries needed */
1821 if (scsi_sg_count(cmd)) {
1822 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1823 scsi_sg_count(cmd), cmd->sc_data_direction);
1824 if (unlikely(!nseg))
1827 sp->flags |= SRB_DMA_VALID;
1829 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1830 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1831 struct qla2_sgx sgx;
1834 memset(&sgx, 0, sizeof(struct qla2_sgx));
1835 sgx.tot_bytes = scsi_bufflen(cmd);
1836 sgx.cur_sg = scsi_sglist(cmd);
1840 while (qla24xx_get_one_block_sg(
1841 cmd->device->sector_size, &sgx, &partial))
1847 /* number of required data segments */
1850 /* Compute number of required protection segments */
1851 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1852 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1853 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1854 if (unlikely(!nseg))
1857 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1859 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1860 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1861 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1868 /* Total Data and protection sg segment(s) */
1869 tot_prot_dsds = nseg;
1871 if (req->cnt < (req_cnt + 2)) {
1872 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1873 RD_REG_DWORD_RELAXED(req->req_q_out);
1874 if (req->ring_index < cnt)
1875 req->cnt = cnt - req->ring_index;
1877 req->cnt = req->length -
1878 (req->ring_index - cnt);
1879 if (req->cnt < (req_cnt + 2))
1883 status |= QDSS_GOT_Q_SPACE;
1885 /* Build header part of command packet (excluding the OPCODE). */
1886 req->current_outstanding_cmd = handle;
1887 req->outstanding_cmds[handle] = sp;
1888 sp->handle = handle;
1889 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1890 req->cnt -= req_cnt;
1892 /* Fill-in common area */
1893 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1894 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1896 clr_ptr = (uint32_t *)cmd_pkt + 2;
1897 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1899 /* Set NPORT-ID and LUN number*/
1900 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1901 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1902 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1903 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1905 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1906 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1908 /* Total Data and protection segment(s) */
1909 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1911 /* Build IOCB segments and adjust for data protection segments */
1912 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1913 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1917 cmd_pkt->entry_count = (uint8_t)req_cnt;
1918 /* Specify response queue number where completion should happen */
1919 cmd_pkt->entry_status = (uint8_t) rsp->id;
1920 cmd_pkt->timeout = cpu_to_le16(0);
1923 /* Adjust ring index. */
1925 if (req->ring_index == req->length) {
1926 req->ring_index = 0;
1927 req->ring_ptr = req->ring;
1931 /* Set chip new ring index. */
1932 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1934 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1939 if (status & QDSS_GOT_Q_SPACE) {
1940 req->outstanding_cmds[handle] = NULL;
1941 req->cnt += req_cnt;
1943 /* Cleanup will be performed by the caller (queuecommand) */
1945 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1946 return QLA_FUNCTION_FAILED;
1950 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1951 * @sp: command to send to the ISP
1953 * Returns non-zero if a failure occurred, else zero.
1956 qla2xxx_start_scsi_mq(srb_t *sp)
1959 unsigned long flags;
1963 struct cmd_type_7 *cmd_pkt;
1967 struct req_que *req = NULL;
1968 struct rsp_que *rsp = NULL;
1969 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1970 struct scsi_qla_host *vha = sp->fcport->vha;
1971 struct qla_hw_data *ha = vha->hw;
1972 struct qla_qpair *qpair = sp->qpair;
1974 /* Acquire qpair specific lock */
1975 spin_lock_irqsave(&qpair->qp_lock, flags);
1977 /* Setup qpair pointers */
1981 /* So we know we haven't pci_map'ed anything yet */
1984 /* Send marker if required */
1985 if (vha->marker_needed != 0) {
1986 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1988 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1989 return QLA_FUNCTION_FAILED;
1991 vha->marker_needed = 0;
1994 /* Check for room in outstanding command list. */
1995 handle = req->current_outstanding_cmd;
1996 for (index = 1; index < req->num_outstanding_cmds; index++) {
1998 if (handle == req->num_outstanding_cmds)
2000 if (!req->outstanding_cmds[handle])
2003 if (index == req->num_outstanding_cmds)
2006 /* Map the sg table so we have an accurate count of sg entries needed */
2007 if (scsi_sg_count(cmd)) {
2008 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2009 scsi_sg_count(cmd), cmd->sc_data_direction);
2010 if (unlikely(!nseg))
2016 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2017 if (req->cnt < (req_cnt + 2)) {
2018 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2019 RD_REG_DWORD_RELAXED(req->req_q_out);
2020 if (req->ring_index < cnt)
2021 req->cnt = cnt - req->ring_index;
2023 req->cnt = req->length -
2024 (req->ring_index - cnt);
2025 if (req->cnt < (req_cnt + 2))
2029 /* Build command packet. */
2030 req->current_outstanding_cmd = handle;
2031 req->outstanding_cmds[handle] = sp;
2032 sp->handle = handle;
2033 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2034 req->cnt -= req_cnt;
2036 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2037 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2039 /* Zero out remaining portion of packet. */
2040 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2041 clr_ptr = (uint32_t *)cmd_pkt + 2;
2042 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2043 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2045 /* Set NPORT-ID and LUN number*/
2046 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2047 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2048 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2049 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2050 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2052 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2053 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2055 cmd_pkt->task = TSK_SIMPLE;
2057 /* Load SCSI command packet. */
2058 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2059 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2061 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2063 /* Build IOCB segments */
2064 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2066 /* Set total data segment count. */
2067 cmd_pkt->entry_count = (uint8_t)req_cnt;
2069 /* Adjust ring index. */
2071 if (req->ring_index == req->length) {
2072 req->ring_index = 0;
2073 req->ring_ptr = req->ring;
2077 sp->flags |= SRB_DMA_VALID;
2079 /* Set chip new ring index. */
2080 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2082 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2087 scsi_dma_unmap(cmd);
2089 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2091 return QLA_FUNCTION_FAILED;
2096 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2097 * @sp: command to send to the ISP
2099 * Returns non-zero if a failure occurred, else zero.
2102 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2105 unsigned long flags;
2110 uint16_t req_cnt = 0;
2112 uint16_t tot_prot_dsds;
2113 uint16_t fw_prot_opts = 0;
2114 struct req_que *req = NULL;
2115 struct rsp_que *rsp = NULL;
2116 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2117 struct scsi_qla_host *vha = sp->fcport->vha;
2118 struct qla_hw_data *ha = vha->hw;
2119 struct cmd_type_crc_2 *cmd_pkt;
2120 uint32_t status = 0;
2121 struct qla_qpair *qpair = sp->qpair;
2123 #define QDSS_GOT_Q_SPACE BIT_0
2125 /* Check for host side state */
2126 if (!qpair->online) {
2127 cmd->result = DID_NO_CONNECT << 16;
2128 return QLA_INTERFACE_ERROR;
2131 if (!qpair->difdix_supported &&
2132 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2133 cmd->result = DID_NO_CONNECT << 16;
2134 return QLA_INTERFACE_ERROR;
2137 /* Only process protection or >16 cdb in this routine */
2138 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2139 if (cmd->cmd_len <= 16)
2140 return qla2xxx_start_scsi_mq(sp);
2143 spin_lock_irqsave(&qpair->qp_lock, flags);
2145 /* Setup qpair pointers */
2149 /* So we know we haven't pci_map'ed anything yet */
2152 /* Send marker if required */
2153 if (vha->marker_needed != 0) {
2154 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
2156 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2157 return QLA_FUNCTION_FAILED;
2159 vha->marker_needed = 0;
2162 /* Check for room in outstanding command list. */
2163 handle = req->current_outstanding_cmd;
2164 for (index = 1; index < req->num_outstanding_cmds; index++) {
2166 if (handle == req->num_outstanding_cmds)
2168 if (!req->outstanding_cmds[handle])
2172 if (index == req->num_outstanding_cmds)
2175 /* Compute number of required data segments */
2176 /* Map the sg table so we have an accurate count of sg entries needed */
2177 if (scsi_sg_count(cmd)) {
2178 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2179 scsi_sg_count(cmd), cmd->sc_data_direction);
2180 if (unlikely(!nseg))
2183 sp->flags |= SRB_DMA_VALID;
2185 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2186 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2187 struct qla2_sgx sgx;
2190 memset(&sgx, 0, sizeof(struct qla2_sgx));
2191 sgx.tot_bytes = scsi_bufflen(cmd);
2192 sgx.cur_sg = scsi_sglist(cmd);
2196 while (qla24xx_get_one_block_sg(
2197 cmd->device->sector_size, &sgx, &partial))
2203 /* number of required data segments */
2206 /* Compute number of required protection segments */
2207 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2208 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2209 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2210 if (unlikely(!nseg))
2213 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2215 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2216 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2217 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2224 /* Total Data and protection sg segment(s) */
2225 tot_prot_dsds = nseg;
2227 if (req->cnt < (req_cnt + 2)) {
2228 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2229 RD_REG_DWORD_RELAXED(req->req_q_out);
2230 if (req->ring_index < cnt)
2231 req->cnt = cnt - req->ring_index;
2233 req->cnt = req->length -
2234 (req->ring_index - cnt);
2235 if (req->cnt < (req_cnt + 2))
2239 status |= QDSS_GOT_Q_SPACE;
2241 /* Build header part of command packet (excluding the OPCODE). */
2242 req->current_outstanding_cmd = handle;
2243 req->outstanding_cmds[handle] = sp;
2244 sp->handle = handle;
2245 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2246 req->cnt -= req_cnt;
2248 /* Fill-in common area */
2249 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2250 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2252 clr_ptr = (uint32_t *)cmd_pkt + 2;
2253 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2255 /* Set NPORT-ID and LUN number*/
2256 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2257 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2258 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2259 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2261 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2262 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2264 /* Total Data and protection segment(s) */
2265 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2267 /* Build IOCB segments and adjust for data protection segments */
2268 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2269 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2273 cmd_pkt->entry_count = (uint8_t)req_cnt;
2274 cmd_pkt->timeout = cpu_to_le16(0);
2277 /* Adjust ring index. */
2279 if (req->ring_index == req->length) {
2280 req->ring_index = 0;
2281 req->ring_ptr = req->ring;
2285 /* Set chip new ring index. */
2286 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2288 /* Manage unprocessed RIO/ZIO commands in response queue. */
2289 if (vha->flags.process_response_queue &&
2290 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2291 qla24xx_process_response_queue(vha, rsp);
2293 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2298 if (status & QDSS_GOT_Q_SPACE) {
2299 req->outstanding_cmds[handle] = NULL;
2300 req->cnt += req_cnt;
2302 /* Cleanup will be performed by the caller (queuecommand) */
2304 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2305 return QLA_FUNCTION_FAILED;
2308 /* Generic Control-SRB manipulation functions. */
2310 /* hardware_lock assumed to be held. */
2313 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2315 scsi_qla_host_t *vha = qpair->vha;
2316 struct qla_hw_data *ha = vha->hw;
2317 struct req_que *req = qpair->req;
2318 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2319 uint32_t index, handle;
2321 uint16_t cnt, req_cnt;
2327 if (sp && (sp->type != SRB_SCSI_CMD)) {
2328 /* Adjust entry-counts as needed. */
2329 req_cnt = sp->iocbs;
2332 /* Check for room on request queue. */
2333 if (req->cnt < req_cnt + 2) {
2334 if (qpair->use_shadow_reg)
2335 cnt = *req->out_ptr;
2336 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2337 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
2338 else if (IS_P3P_TYPE(ha))
2339 cnt = RD_REG_DWORD(®->isp82.req_q_out);
2340 else if (IS_FWI2_CAPABLE(ha))
2341 cnt = RD_REG_DWORD(®->isp24.req_q_out);
2342 else if (IS_QLAFX00(ha))
2343 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
2345 cnt = qla2x00_debounce_register(
2346 ISP_REQ_Q_OUT(ha, ®->isp));
2348 if (req->ring_index < cnt)
2349 req->cnt = cnt - req->ring_index;
2351 req->cnt = req->length -
2352 (req->ring_index - cnt);
2354 if (req->cnt < req_cnt + 2)
2358 /* Check for room in outstanding command list. */
2359 handle = req->current_outstanding_cmd;
2360 for (index = 1; index < req->num_outstanding_cmds; index++) {
2362 if (handle == req->num_outstanding_cmds)
2364 if (!req->outstanding_cmds[handle])
2367 if (index == req->num_outstanding_cmds) {
2368 ql_log(ql_log_warn, vha, 0x700b,
2369 "No room on outstanding cmd array.\n");
2373 /* Prep command array. */
2374 req->current_outstanding_cmd = handle;
2375 req->outstanding_cmds[handle] = sp;
2376 sp->handle = handle;
2380 req->cnt -= req_cnt;
2381 pkt = req->ring_ptr;
2382 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2383 if (IS_QLAFX00(ha)) {
2384 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2385 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2387 pkt->entry_count = req_cnt;
2388 pkt->handle = handle;
2394 qpair->tgt_counters.num_alloc_iocb_failed++;
2399 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2401 scsi_qla_host_t *vha = qpair->vha;
2403 if (qla2x00_reset_active(vha))
2406 return __qla2x00_alloc_iocbs(qpair, sp);
2410 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2412 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2416 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2418 struct srb_iocb *lio = &sp->u.iocb_cmd;
2420 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2421 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2422 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2423 logio->control_flags |= LCF_NVME_PRLI;
2425 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2426 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2427 logio->port_id[1] = sp->fcport->d_id.b.area;
2428 logio->port_id[2] = sp->fcport->d_id.b.domain;
2429 logio->vp_index = sp->vha->vp_idx;
2433 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2435 struct srb_iocb *lio = &sp->u.iocb_cmd;
2437 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2438 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2439 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2441 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2442 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2443 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2444 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2445 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2447 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2448 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2449 logio->port_id[1] = sp->fcport->d_id.b.area;
2450 logio->port_id[2] = sp->fcport->d_id.b.domain;
2451 logio->vp_index = sp->vha->vp_idx;
2455 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2457 struct qla_hw_data *ha = sp->vha->hw;
2458 struct srb_iocb *lio = &sp->u.iocb_cmd;
2461 mbx->entry_type = MBX_IOCB_TYPE;
2462 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2463 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2464 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2465 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2466 if (HAS_EXTENDED_IDS(ha)) {
2467 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2468 mbx->mb10 = cpu_to_le16(opts);
2470 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2472 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2473 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2474 sp->fcport->d_id.b.al_pa);
2475 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2479 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2481 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2482 logio->control_flags =
2483 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2484 if (!sp->fcport->keep_nport_handle)
2485 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2486 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2487 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2488 logio->port_id[1] = sp->fcport->d_id.b.area;
2489 logio->port_id[2] = sp->fcport->d_id.b.domain;
2490 logio->vp_index = sp->vha->vp_idx;
2494 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2496 struct qla_hw_data *ha = sp->vha->hw;
2498 mbx->entry_type = MBX_IOCB_TYPE;
2499 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2500 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2501 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2502 cpu_to_le16(sp->fcport->loop_id):
2503 cpu_to_le16(sp->fcport->loop_id << 8);
2504 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2505 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2506 sp->fcport->d_id.b.al_pa);
2507 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2508 /* Implicit: mbx->mbx10 = 0. */
2512 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2514 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2515 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2516 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2517 logio->vp_index = sp->vha->vp_idx;
2521 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2523 struct qla_hw_data *ha = sp->vha->hw;
2525 mbx->entry_type = MBX_IOCB_TYPE;
2526 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2527 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2528 if (HAS_EXTENDED_IDS(ha)) {
2529 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2530 mbx->mb10 = cpu_to_le16(BIT_0);
2532 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2534 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2535 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2536 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2537 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2538 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2542 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2546 struct fc_port *fcport = sp->fcport;
2547 scsi_qla_host_t *vha = fcport->vha;
2548 struct qla_hw_data *ha = vha->hw;
2549 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2550 struct req_que *req = vha->req;
2552 flags = iocb->u.tmf.flags;
2553 lun = iocb->u.tmf.lun;
2555 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2556 tsk->entry_count = 1;
2557 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2558 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2559 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2560 tsk->control_flags = cpu_to_le32(flags);
2561 tsk->port_id[0] = fcport->d_id.b.al_pa;
2562 tsk->port_id[1] = fcport->d_id.b.area;
2563 tsk->port_id[2] = fcport->d_id.b.domain;
2564 tsk->vp_index = fcport->vha->vp_idx;
2566 if (flags == TCF_LUN_RESET) {
2567 int_to_scsilun(lun, &tsk->lun);
2568 host_to_fcp_swap((uint8_t *)&tsk->lun,
2574 qla2x00_els_dcmd_sp_free(void *data)
2577 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2581 if (elsio->u.els_logo.els_logo_pyld)
2582 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2583 elsio->u.els_logo.els_logo_pyld,
2584 elsio->u.els_logo.els_logo_pyld_dma);
2586 del_timer(&elsio->timer);
2591 qla2x00_els_dcmd_iocb_timeout(void *data)
2594 fc_port_t *fcport = sp->fcport;
2595 struct scsi_qla_host *vha = sp->vha;
2596 struct srb_iocb *lio = &sp->u.iocb_cmd;
2598 ql_dbg(ql_dbg_io, vha, 0x3069,
2599 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2600 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2601 fcport->d_id.b.al_pa);
2603 complete(&lio->u.els_logo.comp);
2607 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2610 fc_port_t *fcport = sp->fcport;
2611 struct srb_iocb *lio = &sp->u.iocb_cmd;
2612 struct scsi_qla_host *vha = sp->vha;
2614 ql_dbg(ql_dbg_io, vha, 0x3072,
2615 "%s hdl=%x, portid=%02x%02x%02x done\n",
2616 sp->name, sp->handle, fcport->d_id.b.domain,
2617 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2619 complete(&lio->u.els_logo.comp);
2623 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2624 port_id_t remote_did)
2627 fc_port_t *fcport = NULL;
2628 struct srb_iocb *elsio = NULL;
2629 struct qla_hw_data *ha = vha->hw;
2630 struct els_logo_payload logo_pyld;
2631 int rval = QLA_SUCCESS;
2633 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2635 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2639 /* Alloc SRB structure */
2640 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2643 ql_log(ql_log_info, vha, 0x70e6,
2644 "SRB allocation failed\n");
2648 elsio = &sp->u.iocb_cmd;
2649 fcport->loop_id = 0xFFFF;
2650 fcport->d_id.b.domain = remote_did.b.domain;
2651 fcport->d_id.b.area = remote_did.b.area;
2652 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2654 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2655 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2657 sp->type = SRB_ELS_DCMD;
2658 sp->name = "ELS_DCMD";
2659 sp->fcport = fcport;
2660 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2661 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2662 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2663 sp->done = qla2x00_els_dcmd_sp_done;
2664 sp->free = qla2x00_els_dcmd_sp_free;
2666 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2667 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2670 if (!elsio->u.els_logo.els_logo_pyld) {
2672 return QLA_FUNCTION_FAILED;
2675 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2677 elsio->u.els_logo.els_cmd = els_opcode;
2678 logo_pyld.opcode = els_opcode;
2679 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2680 logo_pyld.s_id[1] = vha->d_id.b.area;
2681 logo_pyld.s_id[2] = vha->d_id.b.domain;
2682 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2683 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2685 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2686 sizeof(struct els_logo_payload));
2688 rval = qla2x00_start_sp(sp);
2689 if (rval != QLA_SUCCESS) {
2691 return QLA_FUNCTION_FAILED;
2694 ql_dbg(ql_dbg_io, vha, 0x3074,
2695 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2696 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2697 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2699 wait_for_completion(&elsio->u.els_logo.comp);
2706 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2708 scsi_qla_host_t *vha = sp->vha;
2709 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2711 els_iocb->entry_type = ELS_IOCB_TYPE;
2712 els_iocb->entry_count = 1;
2713 els_iocb->sys_define = 0;
2714 els_iocb->entry_status = 0;
2715 els_iocb->handle = sp->handle;
2716 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2717 els_iocb->tx_dsd_count = 1;
2718 els_iocb->vp_index = vha->vp_idx;
2719 els_iocb->sof_type = EST_SOFI3;
2720 els_iocb->rx_dsd_count = 0;
2721 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2723 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2724 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2725 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2726 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2727 els_iocb->s_id[1] = vha->d_id.b.area;
2728 els_iocb->s_id[2] = vha->d_id.b.domain;
2729 els_iocb->control_flags = 0;
2731 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2732 els_iocb->tx_byte_count = els_iocb->tx_len =
2733 sizeof(struct els_plogi_payload);
2734 els_iocb->tx_address[0] =
2735 cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2736 els_iocb->tx_address[1] =
2737 cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2739 els_iocb->rx_dsd_count = 1;
2740 els_iocb->rx_byte_count = els_iocb->rx_len =
2741 sizeof(struct els_plogi_payload);
2742 els_iocb->rx_address[0] =
2743 cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2744 els_iocb->rx_address[1] =
2745 cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2747 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2748 "PLOGI ELS IOCB:\n");
2749 ql_dump_buffer(ql_log_info, vha, 0x0109,
2750 (uint8_t *)els_iocb, 0x70);
2752 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2753 els_iocb->tx_address[0] =
2754 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2755 els_iocb->tx_address[1] =
2756 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2757 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2759 els_iocb->rx_byte_count = 0;
2760 els_iocb->rx_address[0] = 0;
2761 els_iocb->rx_address[1] = 0;
2762 els_iocb->rx_len = 0;
2765 sp->vha->qla_stats.control_requests++;
2769 qla2x00_els_dcmd2_iocb_timeout(void *data)
2772 fc_port_t *fcport = sp->fcport;
2773 struct scsi_qla_host *vha = sp->vha;
2774 struct qla_hw_data *ha = vha->hw;
2775 unsigned long flags = 0;
2778 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2779 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2780 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2782 /* Abort the exchange */
2783 spin_lock_irqsave(&ha->hardware_lock, flags);
2784 res = ha->isp_ops->abort_command(sp);
2785 ql_dbg(ql_dbg_io, vha, 0x3070,
2786 "mbx abort_command %s\n",
2787 (res == QLA_SUCCESS) ? "successful" : "failed");
2788 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2790 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2794 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2797 fc_port_t *fcport = sp->fcport;
2798 struct srb_iocb *lio = &sp->u.iocb_cmd;
2799 struct scsi_qla_host *vha = sp->vha;
2800 struct event_arg ea;
2801 struct qla_work_evt *e;
2803 ql_dbg(ql_dbg_disc, vha, 0x3072,
2804 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2805 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2807 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2808 del_timer(&sp->u.iocb_cmd.timer);
2810 if (sp->flags & SRB_WAKEUP_ON_COMP)
2811 complete(&lio->u.els_plogi.comp);
2814 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2816 memset(&ea, 0, sizeof(ea));
2819 ea.event = FCME_ELS_PLOGI_DONE;
2820 qla2x00_fcport_event_handler(vha, &ea);
2823 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2825 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2827 if (elsio->u.els_plogi.els_plogi_pyld)
2828 dma_free_coherent(&sp->vha->hw->pdev->dev,
2829 elsio->u.els_plogi.tx_size,
2830 elsio->u.els_plogi.els_plogi_pyld,
2831 elsio->u.els_plogi.els_plogi_pyld_dma);
2833 if (elsio->u.els_plogi.els_resp_pyld)
2834 dma_free_coherent(&sp->vha->hw->pdev->dev,
2835 elsio->u.els_plogi.rx_size,
2836 elsio->u.els_plogi.els_resp_pyld,
2837 elsio->u.els_plogi.els_resp_pyld_dma);
2842 qla2x00_post_work(vha, e);
2847 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2848 fc_port_t *fcport, bool wait)
2851 struct srb_iocb *elsio = NULL;
2852 struct qla_hw_data *ha = vha->hw;
2853 int rval = QLA_SUCCESS;
2854 void *ptr, *resp_ptr;
2856 /* Alloc SRB structure */
2857 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2859 ql_log(ql_log_info, vha, 0x70e6,
2860 "SRB allocation failed\n");
2864 elsio = &sp->u.iocb_cmd;
2865 ql_dbg(ql_dbg_io, vha, 0x3073,
2866 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2868 fcport->flags |= FCF_ASYNC_SENT;
2869 sp->type = SRB_ELS_DCMD;
2870 sp->name = "ELS_DCMD";
2871 sp->fcport = fcport;
2873 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2874 init_completion(&elsio->u.els_plogi.comp);
2876 sp->flags = SRB_WAKEUP_ON_COMP;
2878 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2880 sp->done = qla2x00_els_dcmd2_sp_done;
2881 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2883 ptr = elsio->u.els_plogi.els_plogi_pyld =
2884 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2885 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2887 if (!elsio->u.els_plogi.els_plogi_pyld) {
2888 rval = QLA_FUNCTION_FAILED;
2892 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2893 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2894 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2896 if (!elsio->u.els_plogi.els_resp_pyld) {
2897 rval = QLA_FUNCTION_FAILED;
2901 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2903 memset(ptr, 0, sizeof(struct els_plogi_payload));
2904 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2905 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2906 &ha->plogi_els_payld.data,
2907 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2909 elsio->u.els_plogi.els_cmd = els_opcode;
2910 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2912 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2913 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2914 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2916 rval = qla2x00_start_sp(sp);
2917 if (rval != QLA_SUCCESS) {
2918 rval = QLA_FUNCTION_FAILED;
2920 ql_dbg(ql_dbg_disc, vha, 0x3074,
2921 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2922 sp->name, sp->handle, fcport->loop_id,
2923 fcport->d_id.b24, vha->d_id.b24);
2927 wait_for_completion(&elsio->u.els_plogi.comp);
2929 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2930 rval = QLA_FUNCTION_FAILED;
2936 fcport->flags &= ~(FCF_ASYNC_SENT);
2937 if (elsio->u.els_plogi.els_plogi_pyld)
2938 dma_free_coherent(&sp->vha->hw->pdev->dev,
2939 elsio->u.els_plogi.tx_size,
2940 elsio->u.els_plogi.els_plogi_pyld,
2941 elsio->u.els_plogi.els_plogi_pyld_dma);
2943 if (elsio->u.els_plogi.els_resp_pyld)
2944 dma_free_coherent(&sp->vha->hw->pdev->dev,
2945 elsio->u.els_plogi.rx_size,
2946 elsio->u.els_plogi.els_resp_pyld,
2947 elsio->u.els_plogi.els_resp_pyld_dma);
2955 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2957 struct bsg_job *bsg_job = sp->u.bsg_job;
2958 struct fc_bsg_request *bsg_request = bsg_job->request;
2960 els_iocb->entry_type = ELS_IOCB_TYPE;
2961 els_iocb->entry_count = 1;
2962 els_iocb->sys_define = 0;
2963 els_iocb->entry_status = 0;
2964 els_iocb->handle = sp->handle;
2965 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2966 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2967 els_iocb->vp_index = sp->vha->vp_idx;
2968 els_iocb->sof_type = EST_SOFI3;
2969 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2972 sp->type == SRB_ELS_CMD_RPT ?
2973 bsg_request->rqst_data.r_els.els_code :
2974 bsg_request->rqst_data.h_els.command_code;
2975 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2976 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2977 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2978 els_iocb->control_flags = 0;
2979 els_iocb->rx_byte_count =
2980 cpu_to_le32(bsg_job->reply_payload.payload_len);
2981 els_iocb->tx_byte_count =
2982 cpu_to_le32(bsg_job->request_payload.payload_len);
2984 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2985 (bsg_job->request_payload.sg_list)));
2986 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2987 (bsg_job->request_payload.sg_list)));
2988 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2989 (bsg_job->request_payload.sg_list));
2991 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2992 (bsg_job->reply_payload.sg_list)));
2993 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2994 (bsg_job->reply_payload.sg_list)));
2995 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2996 (bsg_job->reply_payload.sg_list));
2998 sp->vha->qla_stats.control_requests++;
3002 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3004 uint16_t avail_dsds;
3006 struct scatterlist *sg;
3009 scsi_qla_host_t *vha = sp->vha;
3010 struct qla_hw_data *ha = vha->hw;
3011 struct bsg_job *bsg_job = sp->u.bsg_job;
3012 int loop_iterartion = 0;
3013 int entry_count = 1;
3015 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3016 ct_iocb->entry_type = CT_IOCB_TYPE;
3017 ct_iocb->entry_status = 0;
3018 ct_iocb->handle1 = sp->handle;
3019 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3020 ct_iocb->status = cpu_to_le16(0);
3021 ct_iocb->control_flags = cpu_to_le16(0);
3022 ct_iocb->timeout = 0;
3023 ct_iocb->cmd_dsd_count =
3024 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3025 ct_iocb->total_dsd_count =
3026 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3027 ct_iocb->req_bytecount =
3028 cpu_to_le32(bsg_job->request_payload.payload_len);
3029 ct_iocb->rsp_bytecount =
3030 cpu_to_le32(bsg_job->reply_payload.payload_len);
3032 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
3033 (bsg_job->request_payload.sg_list)));
3034 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
3035 (bsg_job->request_payload.sg_list)));
3036 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
3038 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
3039 (bsg_job->reply_payload.sg_list)));
3040 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
3041 (bsg_job->reply_payload.sg_list)));
3042 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
3045 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
3047 tot_dsds = bsg_job->reply_payload.sg_cnt;
3049 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3051 cont_a64_entry_t *cont_pkt;
3053 /* Allocate additional continuation packets? */
3054 if (avail_dsds == 0) {
3056 * Five DSDs are available in the Cont.
3059 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3060 vha->hw->req_q_map[0]);
3061 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3066 sle_dma = sg_dma_address(sg);
3067 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3068 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3069 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3073 ct_iocb->entry_count = entry_count;
3075 sp->vha->qla_stats.control_requests++;
3079 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3081 uint16_t avail_dsds;
3083 struct scatterlist *sg;
3085 uint16_t cmd_dsds, rsp_dsds;
3086 scsi_qla_host_t *vha = sp->vha;
3087 struct qla_hw_data *ha = vha->hw;
3088 struct bsg_job *bsg_job = sp->u.bsg_job;
3089 int entry_count = 1;
3090 cont_a64_entry_t *cont_pkt = NULL;
3092 ct_iocb->entry_type = CT_IOCB_TYPE;
3093 ct_iocb->entry_status = 0;
3094 ct_iocb->sys_define = 0;
3095 ct_iocb->handle = sp->handle;
3097 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3098 ct_iocb->vp_index = sp->vha->vp_idx;
3099 ct_iocb->comp_status = cpu_to_le16(0);
3101 cmd_dsds = bsg_job->request_payload.sg_cnt;
3102 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3104 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3105 ct_iocb->timeout = 0;
3106 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3107 ct_iocb->cmd_byte_count =
3108 cpu_to_le32(bsg_job->request_payload.payload_len);
3111 cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
3114 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3117 /* Allocate additional continuation packets? */
3118 if (avail_dsds == 0) {
3120 * Five DSDs are available in the Cont.
3123 cont_pkt = qla2x00_prep_cont_type1_iocb(
3124 vha, ha->req_q_map[0]);
3125 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3130 sle_dma = sg_dma_address(sg);
3131 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3132 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3133 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3139 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3142 /* Allocate additional continuation packets? */
3143 if (avail_dsds == 0) {
3145 * Five DSDs are available in the Cont.
3148 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3150 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3155 sle_dma = sg_dma_address(sg);
3156 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3157 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3158 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3161 ct_iocb->entry_count = entry_count;
3165 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3166 * @sp: command to send to the ISP
3168 * Returns non-zero if a failure occurred, else zero.
3171 qla82xx_start_scsi(srb_t *sp)
3174 unsigned long flags;
3175 struct scsi_cmnd *cmd;
3182 struct device_reg_82xx __iomem *reg;
3185 uint8_t additional_cdb_len;
3186 struct ct6_dsd *ctx;
3187 struct scsi_qla_host *vha = sp->vha;
3188 struct qla_hw_data *ha = vha->hw;
3189 struct req_que *req = NULL;
3190 struct rsp_que *rsp = NULL;
3192 /* Setup device pointers. */
3193 reg = &ha->iobase->isp82;
3194 cmd = GET_CMD_SP(sp);
3196 rsp = ha->rsp_q_map[0];
3198 /* So we know we haven't pci_map'ed anything yet */
3201 dbval = 0x04 | (ha->portnum << 5);
3203 /* Send marker if required */
3204 if (vha->marker_needed != 0) {
3205 if (qla2x00_marker(vha, req,
3206 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3207 ql_log(ql_log_warn, vha, 0x300c,
3208 "qla2x00_marker failed for cmd=%p.\n", cmd);
3209 return QLA_FUNCTION_FAILED;
3211 vha->marker_needed = 0;
3214 /* Acquire ring specific lock */
3215 spin_lock_irqsave(&ha->hardware_lock, flags);
3217 /* Check for room in outstanding command list. */
3218 handle = req->current_outstanding_cmd;
3219 for (index = 1; index < req->num_outstanding_cmds; index++) {
3221 if (handle == req->num_outstanding_cmds)
3223 if (!req->outstanding_cmds[handle])
3226 if (index == req->num_outstanding_cmds)
3229 /* Map the sg table so we have an accurate count of sg entries needed */
3230 if (scsi_sg_count(cmd)) {
3231 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3232 scsi_sg_count(cmd), cmd->sc_data_direction);
3233 if (unlikely(!nseg))
3240 if (tot_dsds > ql2xshiftctondsd) {
3241 struct cmd_type_6 *cmd_pkt;
3242 uint16_t more_dsd_lists = 0;
3243 struct dsd_dma *dsd_ptr;
3246 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3247 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3248 ql_dbg(ql_dbg_io, vha, 0x300d,
3249 "Num of DSD list %d is than %d for cmd=%p.\n",
3250 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3255 if (more_dsd_lists <= ha->gbl_dsd_avail)
3256 goto sufficient_dsds;
3258 more_dsd_lists -= ha->gbl_dsd_avail;
3260 for (i = 0; i < more_dsd_lists; i++) {
3261 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3263 ql_log(ql_log_fatal, vha, 0x300e,
3264 "Failed to allocate memory for dsd_dma "
3265 "for cmd=%p.\n", cmd);
3269 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3270 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3271 if (!dsd_ptr->dsd_addr) {
3273 ql_log(ql_log_fatal, vha, 0x300f,
3274 "Failed to allocate memory for dsd_addr "
3275 "for cmd=%p.\n", cmd);
3278 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3279 ha->gbl_dsd_avail++;
3285 if (req->cnt < (req_cnt + 2)) {
3286 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3287 ®->req_q_out[0]);
3288 if (req->ring_index < cnt)
3289 req->cnt = cnt - req->ring_index;
3291 req->cnt = req->length -
3292 (req->ring_index - cnt);
3293 if (req->cnt < (req_cnt + 2))
3297 ctx = sp->u.scmd.ctx =
3298 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3300 ql_log(ql_log_fatal, vha, 0x3010,
3301 "Failed to allocate ctx for cmd=%p.\n", cmd);
3305 memset(ctx, 0, sizeof(struct ct6_dsd));
3306 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3307 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3308 if (!ctx->fcp_cmnd) {
3309 ql_log(ql_log_fatal, vha, 0x3011,
3310 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3314 /* Initialize the DSD list and dma handle */
3315 INIT_LIST_HEAD(&ctx->dsd_list);
3316 ctx->dsd_use_cnt = 0;
3318 if (cmd->cmd_len > 16) {
3319 additional_cdb_len = cmd->cmd_len - 16;
3320 if ((cmd->cmd_len % 4) != 0) {
3321 /* SCSI command bigger than 16 bytes must be
3324 ql_log(ql_log_warn, vha, 0x3012,
3325 "scsi cmd len %d not multiple of 4 "
3326 "for cmd=%p.\n", cmd->cmd_len, cmd);
3327 goto queuing_error_fcp_cmnd;
3329 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3331 additional_cdb_len = 0;
3332 ctx->fcp_cmnd_len = 12 + 16 + 4;
3335 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3336 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3338 /* Zero out remaining portion of packet. */
3339 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3340 clr_ptr = (uint32_t *)cmd_pkt + 2;
3341 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3342 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3344 /* Set NPORT-ID and LUN number*/
3345 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3346 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3347 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3348 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3349 cmd_pkt->vp_index = sp->vha->vp_idx;
3351 /* Build IOCB segments */
3352 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3353 goto queuing_error_fcp_cmnd;
3355 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3356 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3358 /* build FCP_CMND IU */
3359 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3360 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3362 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3363 ctx->fcp_cmnd->additional_cdb_len |= 1;
3364 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3365 ctx->fcp_cmnd->additional_cdb_len |= 2;
3367 /* Populate the FCP_PRIO. */
3368 if (ha->flags.fcp_prio_enabled)
3369 ctx->fcp_cmnd->task_attribute |=
3370 sp->fcport->fcp_prio << 3;
3372 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3374 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3375 additional_cdb_len);
3376 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3378 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3379 cmd_pkt->fcp_cmnd_dseg_address[0] =
3380 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3381 cmd_pkt->fcp_cmnd_dseg_address[1] =
3382 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3384 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3385 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3386 /* Set total data segment count. */
3387 cmd_pkt->entry_count = (uint8_t)req_cnt;
3388 /* Specify response queue number where
3389 * completion should happen
3391 cmd_pkt->entry_status = (uint8_t) rsp->id;
3393 struct cmd_type_7 *cmd_pkt;
3394 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3395 if (req->cnt < (req_cnt + 2)) {
3396 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3397 ®->req_q_out[0]);
3398 if (req->ring_index < cnt)
3399 req->cnt = cnt - req->ring_index;
3401 req->cnt = req->length -
3402 (req->ring_index - cnt);
3404 if (req->cnt < (req_cnt + 2))
3407 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3408 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3410 /* Zero out remaining portion of packet. */
3411 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3412 clr_ptr = (uint32_t *)cmd_pkt + 2;
3413 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3414 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3416 /* Set NPORT-ID and LUN number*/
3417 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3418 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3419 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3420 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3421 cmd_pkt->vp_index = sp->vha->vp_idx;
3423 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3424 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3425 sizeof(cmd_pkt->lun));
3427 /* Populate the FCP_PRIO. */
3428 if (ha->flags.fcp_prio_enabled)
3429 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3431 /* Load SCSI command packet. */
3432 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3433 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3435 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3437 /* Build IOCB segments */
3438 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3440 /* Set total data segment count. */
3441 cmd_pkt->entry_count = (uint8_t)req_cnt;
3442 /* Specify response queue number where
3443 * completion should happen.
3445 cmd_pkt->entry_status = (uint8_t) rsp->id;
3448 /* Build command packet. */
3449 req->current_outstanding_cmd = handle;
3450 req->outstanding_cmds[handle] = sp;
3451 sp->handle = handle;
3452 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3453 req->cnt -= req_cnt;
3456 /* Adjust ring index. */
3458 if (req->ring_index == req->length) {
3459 req->ring_index = 0;
3460 req->ring_ptr = req->ring;
3464 sp->flags |= SRB_DMA_VALID;
3466 /* Set chip new ring index. */
3467 /* write, read and verify logic */
3468 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3470 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3472 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3474 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3475 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3480 /* Manage unprocessed RIO/ZIO commands in response queue. */
3481 if (vha->flags.process_response_queue &&
3482 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3483 qla24xx_process_response_queue(vha, rsp);
3485 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3488 queuing_error_fcp_cmnd:
3489 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3492 scsi_dma_unmap(cmd);
3494 if (sp->u.scmd.ctx) {
3495 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3496 sp->u.scmd.ctx = NULL;
3498 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3500 return QLA_FUNCTION_FAILED;
3504 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3506 struct srb_iocb *aio = &sp->u.iocb_cmd;
3507 scsi_qla_host_t *vha = sp->vha;
3508 struct req_que *req = sp->qpair->req;
3510 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3511 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3512 abt_iocb->entry_count = 1;
3513 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3515 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3516 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3517 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3518 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3520 abt_iocb->handle_to_abort =
3521 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3522 aio->u.abt.cmd_hndl));
3523 abt_iocb->vp_index = vha->vp_idx;
3524 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3525 /* Send the command to the firmware */
3530 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3534 mbx->entry_type = MBX_IOCB_TYPE;
3535 mbx->handle = sp->handle;
3536 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3538 for (i = 0; i < sz; i++)
3539 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3543 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3545 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3546 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3547 ct_pkt->handle = sp->handle;
3550 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3551 struct nack_to_isp *nack)
3553 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3555 nack->entry_type = NOTIFY_ACK_TYPE;
3556 nack->entry_count = 1;
3557 nack->ox_id = ntfy->ox_id;
3559 nack->u.isp24.handle = sp->handle;
3560 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3561 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3562 nack->u.isp24.flags = ntfy->u.isp24.flags &
3563 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3565 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3566 nack->u.isp24.status = ntfy->u.isp24.status;
3567 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3568 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3569 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3570 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3571 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3572 nack->u.isp24.srr_flags = 0;
3573 nack->u.isp24.srr_reject_code = 0;
3574 nack->u.isp24.srr_reject_code_expl = 0;
3575 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3579 * Build NVME LS request
3582 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3584 struct srb_iocb *nvme;
3585 int rval = QLA_SUCCESS;
3587 nvme = &sp->u.iocb_cmd;
3588 cmd_pkt->entry_type = PT_LS4_REQUEST;
3589 cmd_pkt->entry_count = 1;
3590 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3592 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3593 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3594 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3596 cmd_pkt->tx_dseg_count = 1;
3597 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3598 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3599 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3600 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3602 cmd_pkt->rx_dseg_count = 1;
3603 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3604 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3605 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3606 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3612 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3616 vce->entry_type = VP_CTRL_IOCB_TYPE;
3617 vce->handle = sp->handle;
3618 vce->entry_count = 1;
3619 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3620 vce->vp_count = cpu_to_le16(1);
3623 * index map in firmware starts with 1; decrement index
3624 * this is ok as we never use index 0
3626 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3627 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3628 vce->vp_idx_map[map] |= 1 << pos;
3632 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3634 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3635 logio->control_flags =
3636 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3638 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3639 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3640 logio->port_id[1] = sp->fcport->d_id.b.area;
3641 logio->port_id[2] = sp->fcport->d_id.b.domain;
3642 logio->vp_index = sp->fcport->vha->vp_idx;
3646 qla2x00_start_sp(srb_t *sp)
3649 scsi_qla_host_t *vha = sp->vha;
3650 struct qla_hw_data *ha = vha->hw;
3651 struct qla_qpair *qp = sp->qpair;
3653 unsigned long flags;
3655 rval = QLA_FUNCTION_FAILED;
3656 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3657 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3659 ql_log(ql_log_warn, vha, 0x700c,
3660 "qla2x00_alloc_iocbs failed.\n");
3667 IS_FWI2_CAPABLE(ha) ?
3668 qla24xx_login_iocb(sp, pkt) :
3669 qla2x00_login_iocb(sp, pkt);
3672 qla24xx_prli_iocb(sp, pkt);
3674 case SRB_LOGOUT_CMD:
3675 IS_FWI2_CAPABLE(ha) ?
3676 qla24xx_logout_iocb(sp, pkt) :
3677 qla2x00_logout_iocb(sp, pkt);
3679 case SRB_ELS_CMD_RPT:
3680 case SRB_ELS_CMD_HST:
3681 qla24xx_els_iocb(sp, pkt);
3684 IS_FWI2_CAPABLE(ha) ?
3685 qla24xx_ct_iocb(sp, pkt) :
3686 qla2x00_ct_iocb(sp, pkt);
3689 IS_FWI2_CAPABLE(ha) ?
3690 qla24xx_adisc_iocb(sp, pkt) :
3691 qla2x00_adisc_iocb(sp, pkt);
3695 qlafx00_tm_iocb(sp, pkt) :
3696 qla24xx_tm_iocb(sp, pkt);
3698 case SRB_FXIOCB_DCMD:
3699 case SRB_FXIOCB_BCMD:
3700 qlafx00_fxdisc_iocb(sp, pkt);
3703 qla_nvme_ls(sp, pkt);
3707 qlafx00_abort_iocb(sp, pkt) :
3708 qla24xx_abort_iocb(sp, pkt);
3711 qla24xx_els_logo_iocb(sp, pkt);
3713 case SRB_CT_PTHRU_CMD:
3714 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3717 qla2x00_mb_iocb(sp, pkt);
3719 case SRB_NACK_PLOGI:
3722 qla2x00_send_notify_ack_iocb(sp, pkt);
3725 qla25xx_ctrlvp_iocb(sp, pkt);
3728 qla24xx_prlo_iocb(sp, pkt);
3735 qla2x00_start_iocbs(vha, qp->req);
3737 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3742 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3743 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3745 uint16_t avail_dsds;
3747 uint32_t req_data_len = 0;
3748 uint32_t rsp_data_len = 0;
3749 struct scatterlist *sg;
3751 int entry_count = 1;
3752 struct bsg_job *bsg_job = sp->u.bsg_job;
3754 /*Update entry type to indicate bidir command */
3755 *((uint32_t *)(&cmd_pkt->entry_type)) =
3756 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3758 /* Set the transfer direction, in this set both flags
3759 * Also set the BD_WRAP_BACK flag, firmware will take care
3760 * assigning DID=SID for outgoing pkts.
3762 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3763 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3764 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3767 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3768 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3769 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3770 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3772 vha->bidi_stats.transfer_bytes += req_data_len;
3773 vha->bidi_stats.io_count++;
3775 vha->qla_stats.output_bytes += req_data_len;
3776 vha->qla_stats.output_requests++;
3778 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3779 * are bundled in continuation iocb
3782 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3786 for_each_sg(bsg_job->request_payload.sg_list, sg,
3787 bsg_job->request_payload.sg_cnt, index) {
3789 cont_a64_entry_t *cont_pkt;
3791 /* Allocate additional continuation packets */
3792 if (avail_dsds == 0) {
3793 /* Continuation type 1 IOCB can accomodate
3796 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3797 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3801 sle_dma = sg_dma_address(sg);
3802 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3803 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3804 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3807 /* For read request DSD will always goes to continuation IOCB
3808 * and follow the write DSD. If there is room on the current IOCB
3809 * then it is added to that IOCB else new continuation IOCB is
3812 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3813 bsg_job->reply_payload.sg_cnt, index) {
3815 cont_a64_entry_t *cont_pkt;
3817 /* Allocate additional continuation packets */
3818 if (avail_dsds == 0) {
3819 /* Continuation type 1 IOCB can accomodate
3822 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3823 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3827 sle_dma = sg_dma_address(sg);
3828 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3829 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3830 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3833 /* This value should be same as number of IOCB required for this cmd */
3834 cmd_pkt->entry_count = entry_count;
3838 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3841 struct qla_hw_data *ha = vha->hw;
3842 unsigned long flags;
3848 struct cmd_bidir *cmd_pkt = NULL;
3849 struct rsp_que *rsp;
3850 struct req_que *req;
3851 int rval = EXT_STATUS_OK;
3855 rsp = ha->rsp_q_map[0];
3858 /* Send marker if required */
3859 if (vha->marker_needed != 0) {
3860 if (qla2x00_marker(vha, req,
3861 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3862 return EXT_STATUS_MAILBOX;
3863 vha->marker_needed = 0;
3866 /* Acquire ring specific lock */
3867 spin_lock_irqsave(&ha->hardware_lock, flags);
3869 /* Check for room in outstanding command list. */
3870 handle = req->current_outstanding_cmd;
3871 for (index = 1; index < req->num_outstanding_cmds; index++) {
3873 if (handle == req->num_outstanding_cmds)
3875 if (!req->outstanding_cmds[handle])
3879 if (index == req->num_outstanding_cmds) {
3880 rval = EXT_STATUS_BUSY;
3884 /* Calculate number of IOCB required */
3885 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3887 /* Check for room on request queue. */
3888 if (req->cnt < req_cnt + 2) {
3889 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3890 RD_REG_DWORD_RELAXED(req->req_q_out);
3891 if (req->ring_index < cnt)
3892 req->cnt = cnt - req->ring_index;
3894 req->cnt = req->length -
3895 (req->ring_index - cnt);
3897 if (req->cnt < req_cnt + 2) {
3898 rval = EXT_STATUS_BUSY;
3902 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3903 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3905 /* Zero out remaining portion of packet. */
3906 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3907 clr_ptr = (uint32_t *)cmd_pkt + 2;
3908 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3910 /* Set NPORT-ID (of vha)*/
3911 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3912 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3913 cmd_pkt->port_id[1] = vha->d_id.b.area;
3914 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3916 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3917 cmd_pkt->entry_status = (uint8_t) rsp->id;
3918 /* Build command packet. */
3919 req->current_outstanding_cmd = handle;
3920 req->outstanding_cmds[handle] = sp;
3921 sp->handle = handle;
3922 req->cnt -= req_cnt;
3924 /* Send the command to the firmware */
3926 qla2x00_start_iocbs(vha, req);
3928 spin_unlock_irqrestore(&ha->hardware_lock, flags);