2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds)
58 iocbs += (dsds - 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds)
80 iocbs += (dsds - 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
100 if (req->ring_index == req->length) {
102 req->ring_ptr = req->ring;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 cont_a64_entry_t *cont_pkt;
127 /* Adjust ring index. */
129 if (req->ring_index == req->length) {
131 req->ring_ptr = req->ring;
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
140 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141 cpu_to_le32(CONTINUE_A64_TYPE);
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
150 uint8_t guard = scsi_host_get_guard(cmd->device->host);
152 /* We always use DIFF Bundling for best performance */
155 /* Translate SCSI opcode to a protection opcode */
156 switch (scsi_get_prot_op(cmd)) {
157 case SCSI_PROT_READ_STRIP:
158 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
160 case SCSI_PROT_WRITE_INSERT:
161 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163 case SCSI_PROT_READ_INSERT:
164 *fw_prot_opts |= PO_MODE_DIF_INSERT;
166 case SCSI_PROT_WRITE_STRIP:
167 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
169 case SCSI_PROT_READ_PASS:
170 case SCSI_PROT_WRITE_PASS:
171 if (guard & SHOST_DIX_GUARD_IP)
172 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 default: /* Normal Request */
177 *fw_prot_opts |= PO_MODE_DIF_PASS;
181 return scsi_prot_sg_count(cmd);
185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186 * capable IOCB types.
188 * @sp: SRB command to process
189 * @cmd_pkt: Command type 2 IOCB
190 * @tot_dsds: Total number of segments to transfer
192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197 scsi_qla_host_t *vha;
198 struct scsi_cmnd *cmd;
199 struct scatterlist *sg;
202 cmd = GET_CMD_SP(sp);
204 /* Update entry type to indicate Command Type 2 IOCB */
205 *((uint32_t *)(&cmd_pkt->entry_type)) =
206 cpu_to_le32(COMMAND_TYPE);
208 /* No data transfer */
209 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
210 cmd_pkt->byte_count = cpu_to_le32(0);
215 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
217 /* Three DSDs are available in the Command Type 2 IOCB */
219 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221 /* Load data segments */
222 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223 cont_entry_t *cont_pkt;
225 /* Allocate additional continuation packets? */
226 if (avail_dsds == 0) {
228 * Seven DSDs are available in the Continuation
231 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
236 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244 * capable IOCB types.
246 * @sp: SRB command to process
247 * @cmd_pkt: Command type 3 IOCB
248 * @tot_dsds: Total number of segments to transfer
250 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255 scsi_qla_host_t *vha;
256 struct scsi_cmnd *cmd;
257 struct scatterlist *sg;
260 cmd = GET_CMD_SP(sp);
262 /* Update entry type to indicate Command Type 3 IOCB */
263 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
265 /* No data transfer */
266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267 cmd_pkt->byte_count = cpu_to_le32(0);
272 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
274 /* Two DSDs are available in the Command Type 3 IOCB */
276 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
278 /* Load data segments */
279 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
281 cont_a64_entry_t *cont_pkt;
283 /* Allocate additional continuation packets? */
284 if (avail_dsds == 0) {
286 * Five DSDs are available in the Continuation
289 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
294 sle_dma = sg_dma_address(sg);
295 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
306 * Returns non-zero if a failure occurred, else zero.
309 qla2x00_start_scsi(srb_t *sp)
313 scsi_qla_host_t *vha;
314 struct scsi_cmnd *cmd;
318 cmd_entry_t *cmd_pkt;
322 struct device_reg_2xxx __iomem *reg;
323 struct qla_hw_data *ha;
327 /* Setup device pointers. */
330 reg = &ha->iobase->isp;
331 cmd = GET_CMD_SP(sp);
332 req = ha->req_q_map[0];
333 rsp = ha->rsp_q_map[0];
334 /* So we know we haven't pci_map'ed anything yet */
337 /* Send marker if required */
338 if (vha->marker_needed != 0) {
339 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
341 return (QLA_FUNCTION_FAILED);
343 vha->marker_needed = 0;
346 /* Acquire ring specific lock */
347 spin_lock_irqsave(&ha->hardware_lock, flags);
349 /* Check for room in outstanding command list. */
350 handle = req->current_outstanding_cmd;
351 for (index = 1; index < req->num_outstanding_cmds; index++) {
353 if (handle == req->num_outstanding_cmds)
355 if (!req->outstanding_cmds[handle])
358 if (index == req->num_outstanding_cmds)
361 /* Map the sg table so we have an accurate count of sg entries needed */
362 if (scsi_sg_count(cmd)) {
363 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
364 scsi_sg_count(cmd), cmd->sc_data_direction);
372 /* Calculate the number of request entries needed. */
373 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
374 if (req->cnt < (req_cnt + 2)) {
375 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
376 if (req->ring_index < cnt)
377 req->cnt = cnt - req->ring_index;
379 req->cnt = req->length -
380 (req->ring_index - cnt);
381 /* If still no head room then bail out */
382 if (req->cnt < (req_cnt + 2))
386 /* Build command packet */
387 req->current_outstanding_cmd = handle;
388 req->outstanding_cmds[handle] = sp;
390 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
393 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
394 cmd_pkt->handle = handle;
395 /* Zero out remaining portion of packet. */
396 clr_ptr = (uint32_t *)cmd_pkt + 2;
397 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
398 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
400 /* Set target ID and LUN number*/
401 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
402 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
403 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
405 /* Load SCSI command packet. */
406 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
407 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
409 /* Build IOCB segments */
410 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
412 /* Set total data segment count. */
413 cmd_pkt->entry_count = (uint8_t)req_cnt;
416 /* Adjust ring index. */
418 if (req->ring_index == req->length) {
420 req->ring_ptr = req->ring;
424 sp->flags |= SRB_DMA_VALID;
426 /* Set chip new ring index. */
427 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
430 /* Manage unprocessed RIO/ZIO commands in response queue. */
431 if (vha->flags.process_response_queue &&
432 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
433 qla2x00_process_response_queue(rsp);
435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
436 return (QLA_SUCCESS);
442 spin_unlock_irqrestore(&ha->hardware_lock, flags);
444 return (QLA_FUNCTION_FAILED);
448 * qla2x00_start_iocbs() - Execute the IOCB command
450 * @req: request queue
453 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
455 struct qla_hw_data *ha = vha->hw;
456 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
458 if (IS_P3P_TYPE(ha)) {
459 qla82xx_start_iocbs(vha);
461 /* Adjust ring index. */
463 if (req->ring_index == req->length) {
465 req->ring_ptr = req->ring;
469 /* Set chip new ring index. */
470 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
471 WRT_REG_DWORD(req->req_q_in, req->ring_index);
472 } else if (IS_QLA83XX(ha)) {
473 WRT_REG_DWORD(req->req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
475 } else if (IS_QLAFX00(ha)) {
476 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
477 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
478 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
479 } else if (IS_FWI2_CAPABLE(ha)) {
480 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
481 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
483 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
485 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
491 * qla2x00_marker() - Send a marker IOCB to the firmware.
493 * @qpair: queue pair pointer
496 * @type: marker modifier
498 * Can be called from both normal and interrupt context.
500 * Returns non-zero if a failure occurred, else zero.
503 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
504 uint16_t loop_id, uint64_t lun, uint8_t type)
507 struct mrk_entry_24xx *mrk24 = NULL;
508 struct req_que *req = qpair->req;
509 struct qla_hw_data *ha = vha->hw;
510 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
512 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
514 ql_log(ql_log_warn, base_vha, 0x3026,
515 "Failed to allocate Marker IOCB.\n");
517 return (QLA_FUNCTION_FAILED);
520 mrk->entry_type = MARKER_TYPE;
521 mrk->modifier = type;
522 if (type != MK_SYNC_ALL) {
523 if (IS_FWI2_CAPABLE(ha)) {
524 mrk24 = (struct mrk_entry_24xx *) mrk;
525 mrk24->nport_handle = cpu_to_le16(loop_id);
526 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
527 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
528 mrk24->vp_index = vha->vp_idx;
529 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
531 SET_TARGET_ID(ha, mrk->target, loop_id);
532 mrk->lun = cpu_to_le16((uint16_t)lun);
537 qla2x00_start_iocbs(vha, req);
539 return (QLA_SUCCESS);
543 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
544 uint16_t loop_id, uint64_t lun, uint8_t type)
547 unsigned long flags = 0;
549 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
550 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
551 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
557 * qla2x00_issue_marker
560 * Caller CAN have hardware lock held as specified by ha_locked parameter.
561 * Might release it, then reaquire.
563 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
566 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
567 MK_SYNC_ALL) != QLA_SUCCESS)
568 return QLA_FUNCTION_FAILED;
570 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
571 MK_SYNC_ALL) != QLA_SUCCESS)
572 return QLA_FUNCTION_FAILED;
574 vha->marker_needed = 0;
580 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
583 uint32_t *cur_dsd = NULL;
584 scsi_qla_host_t *vha;
585 struct qla_hw_data *ha;
586 struct scsi_cmnd *cmd;
587 struct scatterlist *cur_seg;
591 uint8_t first_iocb = 1;
592 uint32_t dsd_list_len;
593 struct dsd_dma *dsd_ptr;
596 cmd = GET_CMD_SP(sp);
598 /* Update entry type to indicate Command Type 3 IOCB */
599 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
601 /* No data transfer */
602 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
603 cmd_pkt->byte_count = cpu_to_le32(0);
610 /* Set transfer direction */
611 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
612 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
613 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
614 vha->qla_stats.output_requests++;
615 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
616 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
617 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
618 vha->qla_stats.input_requests++;
621 cur_seg = scsi_sglist(cmd);
622 ctx = GET_CMD_CTX_SP(sp);
625 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
626 QLA_DSDS_PER_IOCB : tot_dsds;
627 tot_dsds -= avail_dsds;
628 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
630 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
631 struct dsd_dma, list);
632 next_dsd = dsd_ptr->dsd_addr;
633 list_del(&dsd_ptr->list);
635 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
641 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
642 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
643 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
644 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
646 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648 *cur_dsd++ = cpu_to_le32(dsd_list_len);
650 cur_dsd = (uint32_t *)next_dsd;
654 sle_dma = sg_dma_address(cur_seg);
655 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
656 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
657 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
658 cur_seg = sg_next(cur_seg);
663 /* Null termination */
667 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
672 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
673 * for Command Type 6.
675 * @dsds: number of data segment decriptors needed
677 * Returns the number of dsd list needed to store @dsds.
679 static inline uint16_t
680 qla24xx_calc_dsd_lists(uint16_t dsds)
682 uint16_t dsd_lists = 0;
684 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
685 if (dsds % QLA_DSDS_PER_IOCB)
692 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
695 * @sp: SRB command to process
696 * @cmd_pkt: Command type 3 IOCB
697 * @tot_dsds: Total number of segments to transfer
698 * @req: pointer to request queue
701 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
702 uint16_t tot_dsds, struct req_que *req)
706 scsi_qla_host_t *vha;
707 struct scsi_cmnd *cmd;
708 struct scatterlist *sg;
711 cmd = GET_CMD_SP(sp);
713 /* Update entry type to indicate Command Type 3 IOCB */
714 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
716 /* No data transfer */
717 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
718 cmd_pkt->byte_count = cpu_to_le32(0);
724 /* Set transfer direction */
725 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
726 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
727 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
728 vha->qla_stats.output_requests++;
729 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
730 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
731 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
732 vha->qla_stats.input_requests++;
735 /* One DSD is available in the Command Type 3 IOCB */
737 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
739 /* Load data segments */
741 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
743 cont_a64_entry_t *cont_pkt;
745 /* Allocate additional continuation packets? */
746 if (avail_dsds == 0) {
748 * Five DSDs are available in the Continuation
751 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
752 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
756 sle_dma = sg_dma_address(sg);
757 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
758 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
759 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
764 struct fw_dif_context {
767 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
768 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
772 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
776 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
777 unsigned int protcnt)
779 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
781 switch (scsi_get_prot_type(cmd)) {
782 case SCSI_PROT_DIF_TYPE0:
784 * No check for ql2xenablehba_err_chk, as it would be an
785 * I/O error if hba tag generation is not done.
787 pkt->ref_tag = cpu_to_le32((uint32_t)
788 (0xffffffff & scsi_get_lba(cmd)));
790 if (!qla2x00_hba_err_chk_enabled(sp))
793 pkt->ref_tag_mask[0] = 0xff;
794 pkt->ref_tag_mask[1] = 0xff;
795 pkt->ref_tag_mask[2] = 0xff;
796 pkt->ref_tag_mask[3] = 0xff;
800 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
801 * match LBA in CDB + N
803 case SCSI_PROT_DIF_TYPE2:
804 pkt->app_tag = cpu_to_le16(0);
805 pkt->app_tag_mask[0] = 0x0;
806 pkt->app_tag_mask[1] = 0x0;
808 pkt->ref_tag = cpu_to_le32((uint32_t)
809 (0xffffffff & scsi_get_lba(cmd)));
811 if (!qla2x00_hba_err_chk_enabled(sp))
814 /* enable ALL bytes of the ref tag */
815 pkt->ref_tag_mask[0] = 0xff;
816 pkt->ref_tag_mask[1] = 0xff;
817 pkt->ref_tag_mask[2] = 0xff;
818 pkt->ref_tag_mask[3] = 0xff;
821 /* For Type 3 protection: 16 bit GUARD only */
822 case SCSI_PROT_DIF_TYPE3:
823 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
824 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
829 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
832 case SCSI_PROT_DIF_TYPE1:
833 pkt->ref_tag = cpu_to_le32((uint32_t)
834 (0xffffffff & scsi_get_lba(cmd)));
835 pkt->app_tag = cpu_to_le16(0);
836 pkt->app_tag_mask[0] = 0x0;
837 pkt->app_tag_mask[1] = 0x0;
839 if (!qla2x00_hba_err_chk_enabled(sp))
842 /* enable ALL bytes of the ref tag */
843 pkt->ref_tag_mask[0] = 0xff;
844 pkt->ref_tag_mask[1] = 0xff;
845 pkt->ref_tag_mask[2] = 0xff;
846 pkt->ref_tag_mask[3] = 0xff;
852 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
855 struct scatterlist *sg;
856 uint32_t cumulative_partial, sg_len;
857 dma_addr_t sg_dma_addr;
859 if (sgx->num_bytes == sgx->tot_bytes)
863 cumulative_partial = sgx->tot_partial;
865 sg_dma_addr = sg_dma_address(sg);
866 sg_len = sg_dma_len(sg);
868 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
870 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
871 sgx->dma_len = (blk_sz - cumulative_partial);
872 sgx->tot_partial = 0;
873 sgx->num_bytes += blk_sz;
876 sgx->dma_len = sg_len - sgx->bytes_consumed;
877 sgx->tot_partial += sgx->dma_len;
881 sgx->bytes_consumed += sgx->dma_len;
883 if (sg_len == sgx->bytes_consumed) {
887 sgx->bytes_consumed = 0;
894 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
895 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
898 uint8_t avail_dsds = 0;
899 uint32_t dsd_list_len;
900 struct dsd_dma *dsd_ptr;
901 struct scatterlist *sg_prot;
902 uint32_t *cur_dsd = dsd;
903 uint16_t used_dsds = tot_dsds;
904 uint32_t prot_int; /* protection interval */
908 uint32_t sle_dma_len, tot_prot_dma_len = 0;
909 struct scsi_cmnd *cmd;
911 memset(&sgx, 0, sizeof(struct qla2_sgx));
913 cmd = GET_CMD_SP(sp);
914 prot_int = cmd->device->sector_size;
916 sgx.tot_bytes = scsi_bufflen(cmd);
917 sgx.cur_sg = scsi_sglist(cmd);
920 sg_prot = scsi_prot_sglist(cmd);
922 prot_int = tc->blk_sz;
923 sgx.tot_bytes = tc->bufflen;
925 sg_prot = tc->prot_sg;
931 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
933 sle_dma = sgx.dma_addr;
934 sle_dma_len = sgx.dma_len;
936 /* Allocate additional continuation packets? */
937 if (avail_dsds == 0) {
938 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
939 QLA_DSDS_PER_IOCB : used_dsds;
940 dsd_list_len = (avail_dsds + 1) * 12;
941 used_dsds -= avail_dsds;
943 /* allocate tracking DS */
944 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
948 /* allocate new list */
949 dsd_ptr->dsd_addr = next_dsd =
950 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
951 &dsd_ptr->dsd_list_dma);
955 * Need to cleanup only this dsd_ptr, rest
956 * will be done by sp_free_dma()
963 list_add_tail(&dsd_ptr->list,
964 &((struct crc_context *)
965 sp->u.scmd.ctx)->dsd_list);
967 sp->flags |= SRB_CRC_CTX_DSD_VALID;
969 list_add_tail(&dsd_ptr->list,
970 &(tc->ctx->dsd_list));
971 *tc->ctx_dsd_alloced = 1;
975 /* add new list to cmd iocb or last list */
976 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
977 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
978 *cur_dsd++ = dsd_list_len;
979 cur_dsd = (uint32_t *)next_dsd;
981 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
982 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
983 *cur_dsd++ = cpu_to_le32(sle_dma_len);
987 /* Got a full protection interval */
988 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
991 tot_prot_dma_len += sle_dma_len;
992 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
993 tot_prot_dma_len = 0;
994 sg_prot = sg_next(sg_prot);
997 partial = 1; /* So as to not re-enter this block */
1001 /* Null termination */
1009 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1010 uint16_t tot_dsds, struct qla_tc_param *tc)
1013 uint8_t avail_dsds = 0;
1014 uint32_t dsd_list_len;
1015 struct dsd_dma *dsd_ptr;
1016 struct scatterlist *sg, *sgl;
1017 uint32_t *cur_dsd = dsd;
1019 uint16_t used_dsds = tot_dsds;
1020 struct scsi_cmnd *cmd;
1023 cmd = GET_CMD_SP(sp);
1024 sgl = scsi_sglist(cmd);
1033 for_each_sg(sgl, sg, tot_dsds, i) {
1036 /* Allocate additional continuation packets? */
1037 if (avail_dsds == 0) {
1038 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1039 QLA_DSDS_PER_IOCB : used_dsds;
1040 dsd_list_len = (avail_dsds + 1) * 12;
1041 used_dsds -= avail_dsds;
1043 /* allocate tracking DS */
1044 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1048 /* allocate new list */
1049 dsd_ptr->dsd_addr = next_dsd =
1050 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1051 &dsd_ptr->dsd_list_dma);
1055 * Need to cleanup only this dsd_ptr, rest
1056 * will be done by sp_free_dma()
1063 list_add_tail(&dsd_ptr->list,
1064 &((struct crc_context *)
1065 sp->u.scmd.ctx)->dsd_list);
1067 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1069 list_add_tail(&dsd_ptr->list,
1070 &(tc->ctx->dsd_list));
1071 *tc->ctx_dsd_alloced = 1;
1074 /* add new list to cmd iocb or last list */
1075 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1076 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1077 *cur_dsd++ = dsd_list_len;
1078 cur_dsd = (uint32_t *)next_dsd;
1080 sle_dma = sg_dma_address(sg);
1082 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1083 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1084 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1088 /* Null termination */
1096 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1097 uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1099 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1100 struct scatterlist *sg, *sgl;
1101 struct crc_context *difctx = NULL;
1102 struct scsi_qla_host *vha;
1104 uint avail_dsds = 0;
1105 uint used_dsds = tot_dsds;
1106 bool dif_local_dma_alloc = false;
1107 bool direction_to_device = false;
1111 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1112 sgl = scsi_prot_sglist(cmd);
1114 difctx = sp->u.scmd.ctx;
1115 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1116 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1117 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1118 __func__, cmd, difctx, sp);
1123 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1129 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1130 "%s: enter (write=%u)\n", __func__, direction_to_device);
1132 /* if initiator doing write or target doing read */
1133 if (direction_to_device) {
1134 for_each_sg(sgl, sg, tot_dsds, i) {
1135 u64 sle_phys = sg_phys(sg);
1137 /* If SGE addr + len flips bits in upper 32-bits */
1138 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1139 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1140 "%s: page boundary crossing (phys=%llx len=%x)\n",
1141 __func__, sle_phys, sg->length);
1144 ha->dif_bundle_crossed_pages++;
1145 dif_local_dma_alloc = true;
1147 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1149 "%s: difctx pointer is NULL\n",
1155 ha->dif_bundle_writes++;
1157 ha->dif_bundle_reads++;
1160 if (ql2xdifbundlinginternalbuffers)
1161 dif_local_dma_alloc = direction_to_device;
1163 if (dif_local_dma_alloc) {
1164 u32 track_difbundl_buf = 0;
1165 u32 ldma_sg_len = 0;
1168 difctx->no_dif_bundl = 0;
1169 difctx->dif_bundl_len = 0;
1171 /* Track DSD buffers */
1172 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1173 /* Track local DMA buffers */
1174 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1176 for_each_sg(sgl, sg, tot_dsds, i) {
1177 u32 sglen = sg_dma_len(sg);
1179 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1180 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1181 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1182 difctx->dif_bundl_len, ldma_needed);
1189 * Allocate list item to store
1192 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1195 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1196 "%s: failed alloc dsd_ptr\n",
1200 ha->dif_bundle_kallocs++;
1202 /* allocate dma buffer */
1203 dsd_ptr->dsd_addr = dma_pool_alloc
1204 (ha->dif_bundl_pool, GFP_ATOMIC,
1205 &dsd_ptr->dsd_list_dma);
1206 if (!dsd_ptr->dsd_addr) {
1207 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1208 "%s: failed alloc ->dsd_ptr\n",
1211 * need to cleanup only this
1212 * dsd_ptr rest will be done
1216 ha->dif_bundle_kallocs--;
1219 ha->dif_bundle_dma_allocs++;
1221 difctx->no_dif_bundl++;
1222 list_add_tail(&dsd_ptr->list,
1223 &difctx->ldif_dma_hndl_list);
1226 /* xfrlen is min of dma pool size and sglen */
1228 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1229 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1232 /* replace with local allocated dma buffer */
1233 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1234 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1235 difctx->dif_bundl_len);
1236 difctx->dif_bundl_len += xfrlen;
1238 ldma_sg_len += xfrlen;
1239 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1247 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1248 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1249 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1250 difctx->dif_bundl_len, difctx->no_dif_bundl,
1251 track_difbundl_buf);
1254 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1256 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1258 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1259 &difctx->ldif_dma_hndl_list, list) {
1260 u32 sglen = (difctx->dif_bundl_len >
1261 DIF_BUNDLING_DMA_POOL_SIZE) ?
1262 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1264 BUG_ON(track_difbundl_buf == 0);
1266 /* Allocate additional continuation packets? */
1267 if (avail_dsds == 0) {
1268 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1270 "%s: adding continuation iocb's\n",
1272 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1273 QLA_DSDS_PER_IOCB : used_dsds;
1274 dsd_list_len = (avail_dsds + 1) * 12;
1275 used_dsds -= avail_dsds;
1277 /* allocate tracking DS */
1278 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1280 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1281 "%s: failed alloc dsd_ptr\n",
1285 ha->dif_bundle_kallocs++;
1287 difctx->no_ldif_dsd++;
1288 /* allocate new list */
1290 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1291 &dsd_ptr->dsd_list_dma);
1292 if (!dsd_ptr->dsd_addr) {
1293 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1294 "%s: failed alloc ->dsd_addr\n",
1297 * need to cleanup only this dsd_ptr
1298 * rest will be done by sp_free_dma()
1301 ha->dif_bundle_kallocs--;
1304 ha->dif_bundle_dma_allocs++;
1307 list_add_tail(&dsd_ptr->list,
1308 &difctx->ldif_dsd_list);
1309 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1311 list_add_tail(&dsd_ptr->list,
1312 &difctx->ldif_dsd_list);
1313 tc->ctx_dsd_alloced = 1;
1316 /* add new list to cmd iocb or last list */
1318 cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1320 cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1321 *cur_dsd++ = dsd_list_len;
1322 cur_dsd = dsd_ptr->dsd_addr;
1324 *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma));
1325 *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma));
1326 *cur_dsd++ = cpu_to_le32(sglen);
1328 difctx->dif_bundl_len -= sglen;
1329 track_difbundl_buf--;
1332 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1333 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1334 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1336 for_each_sg(sgl, sg, tot_dsds, i) {
1339 /* Allocate additional continuation packets? */
1340 if (avail_dsds == 0) {
1341 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1342 QLA_DSDS_PER_IOCB : used_dsds;
1343 dsd_list_len = (avail_dsds + 1) * 12;
1344 used_dsds -= avail_dsds;
1346 /* allocate tracking DS */
1347 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1349 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1351 "%s: failed alloc dsd_dma...\n",
1356 /* allocate new list */
1358 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1359 &dsd_ptr->dsd_list_dma);
1360 if (!dsd_ptr->dsd_addr) {
1361 /* need to cleanup only this dsd_ptr */
1362 /* rest will be done by sp_free_dma() */
1368 list_add_tail(&dsd_ptr->list,
1370 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1372 list_add_tail(&dsd_ptr->list,
1374 tc->ctx_dsd_alloced = 1;
1377 /* add new list to cmd iocb or last list */
1379 cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1381 cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1382 *cur_dsd++ = dsd_list_len;
1383 cur_dsd = dsd_ptr->dsd_addr;
1385 sle_dma = sg_dma_address(sg);
1386 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1387 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1388 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1392 /* Null termination */
1399 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1400 * Type 6 IOCB types.
1402 * @sp: SRB command to process
1403 * @cmd_pkt: Command type 3 IOCB
1404 * @tot_dsds: Total number of segments to transfer
1405 * @tot_prot_dsds: Total number of segments with protection information
1406 * @fw_prot_opts: Protection options to be passed to firmware
1409 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1410 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1412 uint32_t *cur_dsd, *fcp_dl;
1413 scsi_qla_host_t *vha;
1414 struct scsi_cmnd *cmd;
1415 uint32_t total_bytes = 0;
1416 uint32_t data_bytes;
1418 uint8_t bundling = 1;
1420 struct crc_context *crc_ctx_pkt = NULL;
1421 struct qla_hw_data *ha;
1422 uint8_t additional_fcpcdb_len;
1423 uint16_t fcp_cmnd_len;
1424 struct fcp_cmnd *fcp_cmnd;
1425 dma_addr_t crc_ctx_dma;
1427 cmd = GET_CMD_SP(sp);
1429 /* Update entry type to indicate Command Type CRC_2 IOCB */
1430 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1435 /* No data transfer */
1436 data_bytes = scsi_bufflen(cmd);
1437 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1438 cmd_pkt->byte_count = cpu_to_le32(0);
1442 cmd_pkt->vp_index = sp->vha->vp_idx;
1444 /* Set transfer direction */
1445 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1446 cmd_pkt->control_flags =
1447 cpu_to_le16(CF_WRITE_DATA);
1448 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1449 cmd_pkt->control_flags =
1450 cpu_to_le16(CF_READ_DATA);
1453 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1454 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1455 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1456 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1459 /* Allocate CRC context from global pool */
1460 crc_ctx_pkt = sp->u.scmd.ctx =
1461 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1464 goto crc_queuing_error;
1466 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1468 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1471 crc_ctx_pkt->handle = cmd_pkt->handle;
1473 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1475 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1476 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1478 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1479 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1480 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1482 /* Determine SCSI command length -- align to 4 byte boundary */
1483 if (cmd->cmd_len > 16) {
1484 additional_fcpcdb_len = cmd->cmd_len - 16;
1485 if ((cmd->cmd_len % 4) != 0) {
1486 /* SCSI cmd > 16 bytes must be multiple of 4 */
1487 goto crc_queuing_error;
1489 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1491 additional_fcpcdb_len = 0;
1492 fcp_cmnd_len = 12 + 16 + 4;
1495 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1497 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1498 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1499 fcp_cmnd->additional_cdb_len |= 1;
1500 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1501 fcp_cmnd->additional_cdb_len |= 2;
1503 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1504 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1505 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1506 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1507 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1508 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1509 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1510 fcp_cmnd->task_management = 0;
1511 fcp_cmnd->task_attribute = TSK_SIMPLE;
1513 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1515 /* Compute dif len and adjust data len to incude protection */
1517 blk_size = cmd->device->sector_size;
1518 dif_bytes = (data_bytes / blk_size) * 8;
1520 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1521 case SCSI_PROT_READ_INSERT:
1522 case SCSI_PROT_WRITE_STRIP:
1523 total_bytes = data_bytes;
1524 data_bytes += dif_bytes;
1527 case SCSI_PROT_READ_STRIP:
1528 case SCSI_PROT_WRITE_INSERT:
1529 case SCSI_PROT_READ_PASS:
1530 case SCSI_PROT_WRITE_PASS:
1531 total_bytes = data_bytes + dif_bytes;
1537 if (!qla2x00_hba_err_chk_enabled(sp))
1538 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1539 /* HBA error checking enabled */
1540 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1541 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1542 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1543 SCSI_PROT_DIF_TYPE2))
1544 fw_prot_opts |= BIT_10;
1545 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1546 SCSI_PROT_DIF_TYPE3)
1547 fw_prot_opts |= BIT_11;
1551 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1554 * Configure Bundling if we need to fetch interlaving
1555 * protection PCI accesses
1557 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1558 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1559 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1561 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1564 /* Finish the common fields of CRC pkt */
1565 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1566 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1567 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1568 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1569 /* Fibre channel byte count */
1570 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1571 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1572 additional_fcpcdb_len);
1573 *fcp_dl = htonl(total_bytes);
1575 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1576 cmd_pkt->byte_count = cpu_to_le32(0);
1579 /* Walks data segments */
1581 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1583 if (!bundling && tot_prot_dsds) {
1584 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1585 cur_dsd, tot_dsds, NULL))
1586 goto crc_queuing_error;
1587 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1588 (tot_dsds - tot_prot_dsds), NULL))
1589 goto crc_queuing_error;
1591 if (bundling && tot_prot_dsds) {
1592 /* Walks dif segments */
1593 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1594 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1595 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1596 tot_prot_dsds, NULL))
1597 goto crc_queuing_error;
1602 /* Cleanup will be performed by the caller */
1604 return QLA_FUNCTION_FAILED;
1608 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1609 * @sp: command to send to the ISP
1611 * Returns non-zero if a failure occurred, else zero.
1614 qla24xx_start_scsi(srb_t *sp)
1617 unsigned long flags;
1621 struct cmd_type_7 *cmd_pkt;
1625 struct req_que *req = NULL;
1626 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1627 struct scsi_qla_host *vha = sp->vha;
1628 struct qla_hw_data *ha = vha->hw;
1630 /* Setup device pointers. */
1633 /* So we know we haven't pci_map'ed anything yet */
1636 /* Send marker if required */
1637 if (vha->marker_needed != 0) {
1638 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1640 return QLA_FUNCTION_FAILED;
1641 vha->marker_needed = 0;
1644 /* Acquire ring specific lock */
1645 spin_lock_irqsave(&ha->hardware_lock, flags);
1647 /* Check for room in outstanding command list. */
1648 handle = req->current_outstanding_cmd;
1649 for (index = 1; index < req->num_outstanding_cmds; index++) {
1651 if (handle == req->num_outstanding_cmds)
1653 if (!req->outstanding_cmds[handle])
1656 if (index == req->num_outstanding_cmds)
1659 /* Map the sg table so we have an accurate count of sg entries needed */
1660 if (scsi_sg_count(cmd)) {
1661 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1662 scsi_sg_count(cmd), cmd->sc_data_direction);
1663 if (unlikely(!nseg))
1669 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1670 if (req->cnt < (req_cnt + 2)) {
1671 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1672 RD_REG_DWORD_RELAXED(req->req_q_out);
1673 if (req->ring_index < cnt)
1674 req->cnt = cnt - req->ring_index;
1676 req->cnt = req->length -
1677 (req->ring_index - cnt);
1678 if (req->cnt < (req_cnt + 2))
1682 /* Build command packet. */
1683 req->current_outstanding_cmd = handle;
1684 req->outstanding_cmds[handle] = sp;
1685 sp->handle = handle;
1686 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1687 req->cnt -= req_cnt;
1689 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1690 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1692 /* Zero out remaining portion of packet. */
1693 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1694 clr_ptr = (uint32_t *)cmd_pkt + 2;
1695 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1696 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1698 /* Set NPORT-ID and LUN number*/
1699 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1700 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1701 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1702 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1703 cmd_pkt->vp_index = sp->vha->vp_idx;
1705 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1706 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1708 cmd_pkt->task = TSK_SIMPLE;
1710 /* Load SCSI command packet. */
1711 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1712 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1714 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1716 /* Build IOCB segments */
1717 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1719 /* Set total data segment count. */
1720 cmd_pkt->entry_count = (uint8_t)req_cnt;
1722 /* Adjust ring index. */
1724 if (req->ring_index == req->length) {
1725 req->ring_index = 0;
1726 req->ring_ptr = req->ring;
1730 sp->flags |= SRB_DMA_VALID;
1732 /* Set chip new ring index. */
1733 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1735 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1740 scsi_dma_unmap(cmd);
1742 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1744 return QLA_FUNCTION_FAILED;
1748 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1749 * @sp: command to send to the ISP
1751 * Returns non-zero if a failure occurred, else zero.
1754 qla24xx_dif_start_scsi(srb_t *sp)
1757 unsigned long flags;
1762 uint16_t req_cnt = 0;
1764 uint16_t tot_prot_dsds;
1765 uint16_t fw_prot_opts = 0;
1766 struct req_que *req = NULL;
1767 struct rsp_que *rsp = NULL;
1768 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1769 struct scsi_qla_host *vha = sp->vha;
1770 struct qla_hw_data *ha = vha->hw;
1771 struct cmd_type_crc_2 *cmd_pkt;
1772 uint32_t status = 0;
1774 #define QDSS_GOT_Q_SPACE BIT_0
1776 /* Only process protection or >16 cdb in this routine */
1777 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1778 if (cmd->cmd_len <= 16)
1779 return qla24xx_start_scsi(sp);
1782 /* Setup device pointers. */
1786 /* So we know we haven't pci_map'ed anything yet */
1789 /* Send marker if required */
1790 if (vha->marker_needed != 0) {
1791 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1793 return QLA_FUNCTION_FAILED;
1794 vha->marker_needed = 0;
1797 /* Acquire ring specific lock */
1798 spin_lock_irqsave(&ha->hardware_lock, flags);
1800 /* Check for room in outstanding command list. */
1801 handle = req->current_outstanding_cmd;
1802 for (index = 1; index < req->num_outstanding_cmds; index++) {
1804 if (handle == req->num_outstanding_cmds)
1806 if (!req->outstanding_cmds[handle])
1810 if (index == req->num_outstanding_cmds)
1813 /* Compute number of required data segments */
1814 /* Map the sg table so we have an accurate count of sg entries needed */
1815 if (scsi_sg_count(cmd)) {
1816 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1817 scsi_sg_count(cmd), cmd->sc_data_direction);
1818 if (unlikely(!nseg))
1821 sp->flags |= SRB_DMA_VALID;
1823 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1824 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1825 struct qla2_sgx sgx;
1828 memset(&sgx, 0, sizeof(struct qla2_sgx));
1829 sgx.tot_bytes = scsi_bufflen(cmd);
1830 sgx.cur_sg = scsi_sglist(cmd);
1834 while (qla24xx_get_one_block_sg(
1835 cmd->device->sector_size, &sgx, &partial))
1841 /* number of required data segments */
1844 /* Compute number of required protection segments */
1845 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1846 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1847 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1848 if (unlikely(!nseg))
1851 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1853 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1854 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1855 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1862 /* Total Data and protection sg segment(s) */
1863 tot_prot_dsds = nseg;
1865 if (req->cnt < (req_cnt + 2)) {
1866 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1867 RD_REG_DWORD_RELAXED(req->req_q_out);
1868 if (req->ring_index < cnt)
1869 req->cnt = cnt - req->ring_index;
1871 req->cnt = req->length -
1872 (req->ring_index - cnt);
1873 if (req->cnt < (req_cnt + 2))
1877 status |= QDSS_GOT_Q_SPACE;
1879 /* Build header part of command packet (excluding the OPCODE). */
1880 req->current_outstanding_cmd = handle;
1881 req->outstanding_cmds[handle] = sp;
1882 sp->handle = handle;
1883 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1884 req->cnt -= req_cnt;
1886 /* Fill-in common area */
1887 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1888 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1890 clr_ptr = (uint32_t *)cmd_pkt + 2;
1891 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1893 /* Set NPORT-ID and LUN number*/
1894 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1895 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1896 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1897 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1899 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1900 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1902 /* Total Data and protection segment(s) */
1903 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1905 /* Build IOCB segments and adjust for data protection segments */
1906 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1907 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1911 cmd_pkt->entry_count = (uint8_t)req_cnt;
1912 /* Specify response queue number where completion should happen */
1913 cmd_pkt->entry_status = (uint8_t) rsp->id;
1914 cmd_pkt->timeout = cpu_to_le16(0);
1917 /* Adjust ring index. */
1919 if (req->ring_index == req->length) {
1920 req->ring_index = 0;
1921 req->ring_ptr = req->ring;
1925 /* Set chip new ring index. */
1926 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1928 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1933 if (status & QDSS_GOT_Q_SPACE) {
1934 req->outstanding_cmds[handle] = NULL;
1935 req->cnt += req_cnt;
1937 /* Cleanup will be performed by the caller (queuecommand) */
1939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1940 return QLA_FUNCTION_FAILED;
1944 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1945 * @sp: command to send to the ISP
1947 * Returns non-zero if a failure occurred, else zero.
1950 qla2xxx_start_scsi_mq(srb_t *sp)
1953 unsigned long flags;
1957 struct cmd_type_7 *cmd_pkt;
1961 struct req_que *req = NULL;
1962 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1963 struct scsi_qla_host *vha = sp->fcport->vha;
1964 struct qla_hw_data *ha = vha->hw;
1965 struct qla_qpair *qpair = sp->qpair;
1967 /* Acquire qpair specific lock */
1968 spin_lock_irqsave(&qpair->qp_lock, flags);
1970 /* Setup qpair pointers */
1973 /* So we know we haven't pci_map'ed anything yet */
1976 /* Send marker if required */
1977 if (vha->marker_needed != 0) {
1978 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1980 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1981 return QLA_FUNCTION_FAILED;
1983 vha->marker_needed = 0;
1986 /* Check for room in outstanding command list. */
1987 handle = req->current_outstanding_cmd;
1988 for (index = 1; index < req->num_outstanding_cmds; index++) {
1990 if (handle == req->num_outstanding_cmds)
1992 if (!req->outstanding_cmds[handle])
1995 if (index == req->num_outstanding_cmds)
1998 /* Map the sg table so we have an accurate count of sg entries needed */
1999 if (scsi_sg_count(cmd)) {
2000 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2001 scsi_sg_count(cmd), cmd->sc_data_direction);
2002 if (unlikely(!nseg))
2008 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2009 if (req->cnt < (req_cnt + 2)) {
2010 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2011 RD_REG_DWORD_RELAXED(req->req_q_out);
2012 if (req->ring_index < cnt)
2013 req->cnt = cnt - req->ring_index;
2015 req->cnt = req->length -
2016 (req->ring_index - cnt);
2017 if (req->cnt < (req_cnt + 2))
2021 /* Build command packet. */
2022 req->current_outstanding_cmd = handle;
2023 req->outstanding_cmds[handle] = sp;
2024 sp->handle = handle;
2025 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2026 req->cnt -= req_cnt;
2028 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2029 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2031 /* Zero out remaining portion of packet. */
2032 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2033 clr_ptr = (uint32_t *)cmd_pkt + 2;
2034 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2035 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2037 /* Set NPORT-ID and LUN number*/
2038 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2039 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2040 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2041 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2042 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2044 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2045 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2047 cmd_pkt->task = TSK_SIMPLE;
2049 /* Load SCSI command packet. */
2050 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2051 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2053 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2055 /* Build IOCB segments */
2056 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2058 /* Set total data segment count. */
2059 cmd_pkt->entry_count = (uint8_t)req_cnt;
2061 /* Adjust ring index. */
2063 if (req->ring_index == req->length) {
2064 req->ring_index = 0;
2065 req->ring_ptr = req->ring;
2069 sp->flags |= SRB_DMA_VALID;
2071 /* Set chip new ring index. */
2072 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2074 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2079 scsi_dma_unmap(cmd);
2081 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2083 return QLA_FUNCTION_FAILED;
2088 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2089 * @sp: command to send to the ISP
2091 * Returns non-zero if a failure occurred, else zero.
2094 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2097 unsigned long flags;
2102 uint16_t req_cnt = 0;
2104 uint16_t tot_prot_dsds;
2105 uint16_t fw_prot_opts = 0;
2106 struct req_que *req = NULL;
2107 struct rsp_que *rsp = NULL;
2108 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2109 struct scsi_qla_host *vha = sp->fcport->vha;
2110 struct qla_hw_data *ha = vha->hw;
2111 struct cmd_type_crc_2 *cmd_pkt;
2112 uint32_t status = 0;
2113 struct qla_qpair *qpair = sp->qpair;
2115 #define QDSS_GOT_Q_SPACE BIT_0
2117 /* Check for host side state */
2118 if (!qpair->online) {
2119 cmd->result = DID_NO_CONNECT << 16;
2120 return QLA_INTERFACE_ERROR;
2123 if (!qpair->difdix_supported &&
2124 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2125 cmd->result = DID_NO_CONNECT << 16;
2126 return QLA_INTERFACE_ERROR;
2129 /* Only process protection or >16 cdb in this routine */
2130 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2131 if (cmd->cmd_len <= 16)
2132 return qla2xxx_start_scsi_mq(sp);
2135 spin_lock_irqsave(&qpair->qp_lock, flags);
2137 /* Setup qpair pointers */
2141 /* So we know we haven't pci_map'ed anything yet */
2144 /* Send marker if required */
2145 if (vha->marker_needed != 0) {
2146 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2148 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2149 return QLA_FUNCTION_FAILED;
2151 vha->marker_needed = 0;
2154 /* Check for room in outstanding command list. */
2155 handle = req->current_outstanding_cmd;
2156 for (index = 1; index < req->num_outstanding_cmds; index++) {
2158 if (handle == req->num_outstanding_cmds)
2160 if (!req->outstanding_cmds[handle])
2164 if (index == req->num_outstanding_cmds)
2167 /* Compute number of required data segments */
2168 /* Map the sg table so we have an accurate count of sg entries needed */
2169 if (scsi_sg_count(cmd)) {
2170 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2171 scsi_sg_count(cmd), cmd->sc_data_direction);
2172 if (unlikely(!nseg))
2175 sp->flags |= SRB_DMA_VALID;
2177 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2178 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2179 struct qla2_sgx sgx;
2182 memset(&sgx, 0, sizeof(struct qla2_sgx));
2183 sgx.tot_bytes = scsi_bufflen(cmd);
2184 sgx.cur_sg = scsi_sglist(cmd);
2188 while (qla24xx_get_one_block_sg(
2189 cmd->device->sector_size, &sgx, &partial))
2195 /* number of required data segments */
2198 /* Compute number of required protection segments */
2199 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2200 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2201 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2202 if (unlikely(!nseg))
2205 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2207 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2208 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2209 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2216 /* Total Data and protection sg segment(s) */
2217 tot_prot_dsds = nseg;
2219 if (req->cnt < (req_cnt + 2)) {
2220 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2221 RD_REG_DWORD_RELAXED(req->req_q_out);
2222 if (req->ring_index < cnt)
2223 req->cnt = cnt - req->ring_index;
2225 req->cnt = req->length -
2226 (req->ring_index - cnt);
2227 if (req->cnt < (req_cnt + 2))
2231 status |= QDSS_GOT_Q_SPACE;
2233 /* Build header part of command packet (excluding the OPCODE). */
2234 req->current_outstanding_cmd = handle;
2235 req->outstanding_cmds[handle] = sp;
2236 sp->handle = handle;
2237 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2238 req->cnt -= req_cnt;
2240 /* Fill-in common area */
2241 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2242 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2244 clr_ptr = (uint32_t *)cmd_pkt + 2;
2245 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2247 /* Set NPORT-ID and LUN number*/
2248 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2249 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2250 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2251 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2253 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2254 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2256 /* Total Data and protection segment(s) */
2257 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2259 /* Build IOCB segments and adjust for data protection segments */
2260 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2261 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2265 cmd_pkt->entry_count = (uint8_t)req_cnt;
2266 cmd_pkt->timeout = cpu_to_le16(0);
2269 /* Adjust ring index. */
2271 if (req->ring_index == req->length) {
2272 req->ring_index = 0;
2273 req->ring_ptr = req->ring;
2277 /* Set chip new ring index. */
2278 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2280 /* Manage unprocessed RIO/ZIO commands in response queue. */
2281 if (vha->flags.process_response_queue &&
2282 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2283 qla24xx_process_response_queue(vha, rsp);
2285 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2290 if (status & QDSS_GOT_Q_SPACE) {
2291 req->outstanding_cmds[handle] = NULL;
2292 req->cnt += req_cnt;
2294 /* Cleanup will be performed by the caller (queuecommand) */
2296 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2297 return QLA_FUNCTION_FAILED;
2300 /* Generic Control-SRB manipulation functions. */
2302 /* hardware_lock assumed to be held. */
2305 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2307 scsi_qla_host_t *vha = qpair->vha;
2308 struct qla_hw_data *ha = vha->hw;
2309 struct req_que *req = qpair->req;
2310 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2311 uint32_t index, handle;
2313 uint16_t cnt, req_cnt;
2319 if (sp && (sp->type != SRB_SCSI_CMD)) {
2320 /* Adjust entry-counts as needed. */
2321 req_cnt = sp->iocbs;
2324 /* Check for room on request queue. */
2325 if (req->cnt < req_cnt + 2) {
2326 if (qpair->use_shadow_reg)
2327 cnt = *req->out_ptr;
2328 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2330 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
2331 else if (IS_P3P_TYPE(ha))
2332 cnt = RD_REG_DWORD(®->isp82.req_q_out);
2333 else if (IS_FWI2_CAPABLE(ha))
2334 cnt = RD_REG_DWORD(®->isp24.req_q_out);
2335 else if (IS_QLAFX00(ha))
2336 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
2338 cnt = qla2x00_debounce_register(
2339 ISP_REQ_Q_OUT(ha, ®->isp));
2341 if (req->ring_index < cnt)
2342 req->cnt = cnt - req->ring_index;
2344 req->cnt = req->length -
2345 (req->ring_index - cnt);
2347 if (req->cnt < req_cnt + 2)
2351 /* Check for room in outstanding command list. */
2352 handle = req->current_outstanding_cmd;
2353 for (index = 1; index < req->num_outstanding_cmds; index++) {
2355 if (handle == req->num_outstanding_cmds)
2357 if (!req->outstanding_cmds[handle])
2360 if (index == req->num_outstanding_cmds) {
2361 ql_log(ql_log_warn, vha, 0x700b,
2362 "No room on outstanding cmd array.\n");
2366 /* Prep command array. */
2367 req->current_outstanding_cmd = handle;
2368 req->outstanding_cmds[handle] = sp;
2369 sp->handle = handle;
2373 req->cnt -= req_cnt;
2374 pkt = req->ring_ptr;
2375 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2376 if (IS_QLAFX00(ha)) {
2377 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2378 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2380 pkt->entry_count = req_cnt;
2381 pkt->handle = handle;
2387 qpair->tgt_counters.num_alloc_iocb_failed++;
2392 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2394 scsi_qla_host_t *vha = qpair->vha;
2396 if (qla2x00_reset_active(vha))
2399 return __qla2x00_alloc_iocbs(qpair, sp);
2403 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2405 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2409 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2411 struct srb_iocb *lio = &sp->u.iocb_cmd;
2413 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2414 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2415 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2416 logio->control_flags |= LCF_NVME_PRLI;
2417 if (sp->vha->flags.nvme_first_burst)
2418 logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2421 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2422 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2423 logio->port_id[1] = sp->fcport->d_id.b.area;
2424 logio->port_id[2] = sp->fcport->d_id.b.domain;
2425 logio->vp_index = sp->vha->vp_idx;
2429 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2431 struct srb_iocb *lio = &sp->u.iocb_cmd;
2433 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2434 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2435 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2437 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2438 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2439 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2440 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2441 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2443 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2444 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2445 logio->port_id[1] = sp->fcport->d_id.b.area;
2446 logio->port_id[2] = sp->fcport->d_id.b.domain;
2447 logio->vp_index = sp->vha->vp_idx;
2451 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2453 struct qla_hw_data *ha = sp->vha->hw;
2454 struct srb_iocb *lio = &sp->u.iocb_cmd;
2457 mbx->entry_type = MBX_IOCB_TYPE;
2458 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2459 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2460 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2461 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2462 if (HAS_EXTENDED_IDS(ha)) {
2463 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2464 mbx->mb10 = cpu_to_le16(opts);
2466 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2468 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2469 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2470 sp->fcport->d_id.b.al_pa);
2471 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2475 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2477 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2478 logio->control_flags =
2479 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2480 if (!sp->fcport->keep_nport_handle)
2481 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2482 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2483 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2484 logio->port_id[1] = sp->fcport->d_id.b.area;
2485 logio->port_id[2] = sp->fcport->d_id.b.domain;
2486 logio->vp_index = sp->vha->vp_idx;
2490 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2492 struct qla_hw_data *ha = sp->vha->hw;
2494 mbx->entry_type = MBX_IOCB_TYPE;
2495 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2496 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2497 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2498 cpu_to_le16(sp->fcport->loop_id):
2499 cpu_to_le16(sp->fcport->loop_id << 8);
2500 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2501 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2502 sp->fcport->d_id.b.al_pa);
2503 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2504 /* Implicit: mbx->mbx10 = 0. */
2508 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2510 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2511 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2512 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2513 logio->vp_index = sp->vha->vp_idx;
2517 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2519 struct qla_hw_data *ha = sp->vha->hw;
2521 mbx->entry_type = MBX_IOCB_TYPE;
2522 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2523 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2524 if (HAS_EXTENDED_IDS(ha)) {
2525 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2526 mbx->mb10 = cpu_to_le16(BIT_0);
2528 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2530 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2531 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2532 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2533 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2534 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2538 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2542 struct fc_port *fcport = sp->fcport;
2543 scsi_qla_host_t *vha = fcport->vha;
2544 struct qla_hw_data *ha = vha->hw;
2545 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2546 struct req_que *req = vha->req;
2548 flags = iocb->u.tmf.flags;
2549 lun = iocb->u.tmf.lun;
2551 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2552 tsk->entry_count = 1;
2553 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2554 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2555 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2556 tsk->control_flags = cpu_to_le32(flags);
2557 tsk->port_id[0] = fcport->d_id.b.al_pa;
2558 tsk->port_id[1] = fcport->d_id.b.area;
2559 tsk->port_id[2] = fcport->d_id.b.domain;
2560 tsk->vp_index = fcport->vha->vp_idx;
2562 if (flags == TCF_LUN_RESET) {
2563 int_to_scsilun(lun, &tsk->lun);
2564 host_to_fcp_swap((uint8_t *)&tsk->lun,
2570 qla2x00_els_dcmd_sp_free(void *data)
2573 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2577 if (elsio->u.els_logo.els_logo_pyld)
2578 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2579 elsio->u.els_logo.els_logo_pyld,
2580 elsio->u.els_logo.els_logo_pyld_dma);
2582 del_timer(&elsio->timer);
2587 qla2x00_els_dcmd_iocb_timeout(void *data)
2590 fc_port_t *fcport = sp->fcport;
2591 struct scsi_qla_host *vha = sp->vha;
2592 struct srb_iocb *lio = &sp->u.iocb_cmd;
2594 ql_dbg(ql_dbg_io, vha, 0x3069,
2595 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2596 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2597 fcport->d_id.b.al_pa);
2599 complete(&lio->u.els_logo.comp);
2603 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2606 fc_port_t *fcport = sp->fcport;
2607 struct srb_iocb *lio = &sp->u.iocb_cmd;
2608 struct scsi_qla_host *vha = sp->vha;
2610 ql_dbg(ql_dbg_io, vha, 0x3072,
2611 "%s hdl=%x, portid=%02x%02x%02x done\n",
2612 sp->name, sp->handle, fcport->d_id.b.domain,
2613 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2615 complete(&lio->u.els_logo.comp);
2619 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2620 port_id_t remote_did)
2623 fc_port_t *fcport = NULL;
2624 struct srb_iocb *elsio = NULL;
2625 struct qla_hw_data *ha = vha->hw;
2626 struct els_logo_payload logo_pyld;
2627 int rval = QLA_SUCCESS;
2629 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2631 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2635 /* Alloc SRB structure */
2636 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2639 ql_log(ql_log_info, vha, 0x70e6,
2640 "SRB allocation failed\n");
2644 elsio = &sp->u.iocb_cmd;
2645 fcport->loop_id = 0xFFFF;
2646 fcport->d_id.b.domain = remote_did.b.domain;
2647 fcport->d_id.b.area = remote_did.b.area;
2648 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2650 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2651 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2653 sp->type = SRB_ELS_DCMD;
2654 sp->name = "ELS_DCMD";
2655 sp->fcport = fcport;
2656 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2657 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2658 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2659 sp->done = qla2x00_els_dcmd_sp_done;
2660 sp->free = qla2x00_els_dcmd_sp_free;
2662 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2663 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2666 if (!elsio->u.els_logo.els_logo_pyld) {
2668 return QLA_FUNCTION_FAILED;
2671 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2673 elsio->u.els_logo.els_cmd = els_opcode;
2674 logo_pyld.opcode = els_opcode;
2675 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2676 logo_pyld.s_id[1] = vha->d_id.b.area;
2677 logo_pyld.s_id[2] = vha->d_id.b.domain;
2678 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2679 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2681 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2682 sizeof(struct els_logo_payload));
2684 rval = qla2x00_start_sp(sp);
2685 if (rval != QLA_SUCCESS) {
2687 return QLA_FUNCTION_FAILED;
2690 ql_dbg(ql_dbg_io, vha, 0x3074,
2691 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2692 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2693 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2695 wait_for_completion(&elsio->u.els_logo.comp);
2702 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2704 scsi_qla_host_t *vha = sp->vha;
2705 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2707 els_iocb->entry_type = ELS_IOCB_TYPE;
2708 els_iocb->entry_count = 1;
2709 els_iocb->sys_define = 0;
2710 els_iocb->entry_status = 0;
2711 els_iocb->handle = sp->handle;
2712 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2713 els_iocb->tx_dsd_count = 1;
2714 els_iocb->vp_index = vha->vp_idx;
2715 els_iocb->sof_type = EST_SOFI3;
2716 els_iocb->rx_dsd_count = 0;
2717 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2719 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2720 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2721 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2722 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2723 els_iocb->s_id[1] = vha->d_id.b.area;
2724 els_iocb->s_id[2] = vha->d_id.b.domain;
2725 els_iocb->control_flags = 0;
2727 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2728 els_iocb->tx_byte_count = els_iocb->tx_len =
2729 sizeof(struct els_plogi_payload);
2730 els_iocb->tx_address[0] =
2731 cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2732 els_iocb->tx_address[1] =
2733 cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2735 els_iocb->rx_dsd_count = 1;
2736 els_iocb->rx_byte_count = els_iocb->rx_len =
2737 sizeof(struct els_plogi_payload);
2738 els_iocb->rx_address[0] =
2739 cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2740 els_iocb->rx_address[1] =
2741 cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2743 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2744 "PLOGI ELS IOCB:\n");
2745 ql_dump_buffer(ql_log_info, vha, 0x0109,
2746 (uint8_t *)els_iocb, 0x70);
2748 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2749 els_iocb->tx_address[0] =
2750 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2751 els_iocb->tx_address[1] =
2752 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2753 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2755 els_iocb->rx_byte_count = 0;
2756 els_iocb->rx_address[0] = 0;
2757 els_iocb->rx_address[1] = 0;
2758 els_iocb->rx_len = 0;
2761 sp->vha->qla_stats.control_requests++;
2765 qla2x00_els_dcmd2_iocb_timeout(void *data)
2768 fc_port_t *fcport = sp->fcport;
2769 struct scsi_qla_host *vha = sp->vha;
2770 struct qla_hw_data *ha = vha->hw;
2771 unsigned long flags = 0;
2774 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2775 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2776 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2778 /* Abort the exchange */
2779 spin_lock_irqsave(&ha->hardware_lock, flags);
2780 res = ha->isp_ops->abort_command(sp);
2781 ql_dbg(ql_dbg_io, vha, 0x3070,
2782 "mbx abort_command %s\n",
2783 (res == QLA_SUCCESS) ? "successful" : "failed");
2784 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2786 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2790 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2793 fc_port_t *fcport = sp->fcport;
2794 struct srb_iocb *lio = &sp->u.iocb_cmd;
2795 struct scsi_qla_host *vha = sp->vha;
2796 struct event_arg ea;
2797 struct qla_work_evt *e;
2799 ql_dbg(ql_dbg_disc, vha, 0x3072,
2800 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2801 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2803 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2804 del_timer(&sp->u.iocb_cmd.timer);
2806 if (sp->flags & SRB_WAKEUP_ON_COMP)
2807 complete(&lio->u.els_plogi.comp);
2810 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2812 memset(&ea, 0, sizeof(ea));
2815 ea.event = FCME_ELS_PLOGI_DONE;
2816 qla2x00_fcport_event_handler(vha, &ea);
2819 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2821 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2823 if (elsio->u.els_plogi.els_plogi_pyld)
2824 dma_free_coherent(&sp->vha->hw->pdev->dev,
2825 elsio->u.els_plogi.tx_size,
2826 elsio->u.els_plogi.els_plogi_pyld,
2827 elsio->u.els_plogi.els_plogi_pyld_dma);
2829 if (elsio->u.els_plogi.els_resp_pyld)
2830 dma_free_coherent(&sp->vha->hw->pdev->dev,
2831 elsio->u.els_plogi.rx_size,
2832 elsio->u.els_plogi.els_resp_pyld,
2833 elsio->u.els_plogi.els_resp_pyld_dma);
2838 qla2x00_post_work(vha, e);
2843 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2844 fc_port_t *fcport, bool wait)
2847 struct srb_iocb *elsio = NULL;
2848 struct qla_hw_data *ha = vha->hw;
2849 int rval = QLA_SUCCESS;
2850 void *ptr, *resp_ptr;
2852 /* Alloc SRB structure */
2853 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2855 ql_log(ql_log_info, vha, 0x70e6,
2856 "SRB allocation failed\n");
2860 elsio = &sp->u.iocb_cmd;
2861 ql_dbg(ql_dbg_io, vha, 0x3073,
2862 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2864 fcport->flags |= FCF_ASYNC_SENT;
2865 sp->type = SRB_ELS_DCMD;
2866 sp->name = "ELS_DCMD";
2867 sp->fcport = fcport;
2869 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2870 init_completion(&elsio->u.els_plogi.comp);
2872 sp->flags = SRB_WAKEUP_ON_COMP;
2874 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2876 sp->done = qla2x00_els_dcmd2_sp_done;
2877 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2879 ptr = elsio->u.els_plogi.els_plogi_pyld =
2880 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2881 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2883 if (!elsio->u.els_plogi.els_plogi_pyld) {
2884 rval = QLA_FUNCTION_FAILED;
2888 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2889 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2890 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2892 if (!elsio->u.els_plogi.els_resp_pyld) {
2893 rval = QLA_FUNCTION_FAILED;
2897 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2899 memset(ptr, 0, sizeof(struct els_plogi_payload));
2900 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2901 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2902 &ha->plogi_els_payld.data,
2903 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2905 elsio->u.els_plogi.els_cmd = els_opcode;
2906 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2908 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2909 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2910 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2912 rval = qla2x00_start_sp(sp);
2913 if (rval != QLA_SUCCESS) {
2914 rval = QLA_FUNCTION_FAILED;
2916 ql_dbg(ql_dbg_disc, vha, 0x3074,
2917 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2918 sp->name, sp->handle, fcport->loop_id,
2919 fcport->d_id.b24, vha->d_id.b24);
2923 wait_for_completion(&elsio->u.els_plogi.comp);
2925 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2926 rval = QLA_FUNCTION_FAILED;
2932 fcport->flags &= ~(FCF_ASYNC_SENT);
2933 if (elsio->u.els_plogi.els_plogi_pyld)
2934 dma_free_coherent(&sp->vha->hw->pdev->dev,
2935 elsio->u.els_plogi.tx_size,
2936 elsio->u.els_plogi.els_plogi_pyld,
2937 elsio->u.els_plogi.els_plogi_pyld_dma);
2939 if (elsio->u.els_plogi.els_resp_pyld)
2940 dma_free_coherent(&sp->vha->hw->pdev->dev,
2941 elsio->u.els_plogi.rx_size,
2942 elsio->u.els_plogi.els_resp_pyld,
2943 elsio->u.els_plogi.els_resp_pyld_dma);
2951 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2953 struct bsg_job *bsg_job = sp->u.bsg_job;
2954 struct fc_bsg_request *bsg_request = bsg_job->request;
2956 els_iocb->entry_type = ELS_IOCB_TYPE;
2957 els_iocb->entry_count = 1;
2958 els_iocb->sys_define = 0;
2959 els_iocb->entry_status = 0;
2960 els_iocb->handle = sp->handle;
2961 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2962 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2963 els_iocb->vp_index = sp->vha->vp_idx;
2964 els_iocb->sof_type = EST_SOFI3;
2965 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2968 sp->type == SRB_ELS_CMD_RPT ?
2969 bsg_request->rqst_data.r_els.els_code :
2970 bsg_request->rqst_data.h_els.command_code;
2971 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2972 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2973 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2974 els_iocb->control_flags = 0;
2975 els_iocb->rx_byte_count =
2976 cpu_to_le32(bsg_job->reply_payload.payload_len);
2977 els_iocb->tx_byte_count =
2978 cpu_to_le32(bsg_job->request_payload.payload_len);
2980 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2981 (bsg_job->request_payload.sg_list)));
2982 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2983 (bsg_job->request_payload.sg_list)));
2984 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2985 (bsg_job->request_payload.sg_list));
2987 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2988 (bsg_job->reply_payload.sg_list)));
2989 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2990 (bsg_job->reply_payload.sg_list)));
2991 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2992 (bsg_job->reply_payload.sg_list));
2994 sp->vha->qla_stats.control_requests++;
2998 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3000 uint16_t avail_dsds;
3002 struct scatterlist *sg;
3005 scsi_qla_host_t *vha = sp->vha;
3006 struct qla_hw_data *ha = vha->hw;
3007 struct bsg_job *bsg_job = sp->u.bsg_job;
3008 int loop_iterartion = 0;
3009 int entry_count = 1;
3011 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3012 ct_iocb->entry_type = CT_IOCB_TYPE;
3013 ct_iocb->entry_status = 0;
3014 ct_iocb->handle1 = sp->handle;
3015 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3016 ct_iocb->status = cpu_to_le16(0);
3017 ct_iocb->control_flags = cpu_to_le16(0);
3018 ct_iocb->timeout = 0;
3019 ct_iocb->cmd_dsd_count =
3020 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3021 ct_iocb->total_dsd_count =
3022 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3023 ct_iocb->req_bytecount =
3024 cpu_to_le32(bsg_job->request_payload.payload_len);
3025 ct_iocb->rsp_bytecount =
3026 cpu_to_le32(bsg_job->reply_payload.payload_len);
3028 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
3029 (bsg_job->request_payload.sg_list)));
3030 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
3031 (bsg_job->request_payload.sg_list)));
3032 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
3034 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
3035 (bsg_job->reply_payload.sg_list)));
3036 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
3037 (bsg_job->reply_payload.sg_list)));
3038 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
3041 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
3043 tot_dsds = bsg_job->reply_payload.sg_cnt;
3045 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3047 cont_a64_entry_t *cont_pkt;
3049 /* Allocate additional continuation packets? */
3050 if (avail_dsds == 0) {
3052 * Five DSDs are available in the Cont.
3055 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3056 vha->hw->req_q_map[0]);
3057 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3062 sle_dma = sg_dma_address(sg);
3063 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3064 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3065 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3069 ct_iocb->entry_count = entry_count;
3071 sp->vha->qla_stats.control_requests++;
3075 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3077 uint16_t avail_dsds;
3079 struct scatterlist *sg;
3081 uint16_t cmd_dsds, rsp_dsds;
3082 scsi_qla_host_t *vha = sp->vha;
3083 struct qla_hw_data *ha = vha->hw;
3084 struct bsg_job *bsg_job = sp->u.bsg_job;
3085 int entry_count = 1;
3086 cont_a64_entry_t *cont_pkt = NULL;
3088 ct_iocb->entry_type = CT_IOCB_TYPE;
3089 ct_iocb->entry_status = 0;
3090 ct_iocb->sys_define = 0;
3091 ct_iocb->handle = sp->handle;
3093 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3094 ct_iocb->vp_index = sp->vha->vp_idx;
3095 ct_iocb->comp_status = cpu_to_le16(0);
3097 cmd_dsds = bsg_job->request_payload.sg_cnt;
3098 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3100 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3101 ct_iocb->timeout = 0;
3102 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3103 ct_iocb->cmd_byte_count =
3104 cpu_to_le32(bsg_job->request_payload.payload_len);
3107 cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
3110 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3113 /* Allocate additional continuation packets? */
3114 if (avail_dsds == 0) {
3116 * Five DSDs are available in the Cont.
3119 cont_pkt = qla2x00_prep_cont_type1_iocb(
3120 vha, ha->req_q_map[0]);
3121 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3126 sle_dma = sg_dma_address(sg);
3127 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3128 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3129 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3135 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3138 /* Allocate additional continuation packets? */
3139 if (avail_dsds == 0) {
3141 * Five DSDs are available in the Cont.
3144 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3146 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3151 sle_dma = sg_dma_address(sg);
3152 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3153 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3154 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3157 ct_iocb->entry_count = entry_count;
3161 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3162 * @sp: command to send to the ISP
3164 * Returns non-zero if a failure occurred, else zero.
3167 qla82xx_start_scsi(srb_t *sp)
3170 unsigned long flags;
3171 struct scsi_cmnd *cmd;
3178 struct device_reg_82xx __iomem *reg;
3181 uint8_t additional_cdb_len;
3182 struct ct6_dsd *ctx;
3183 struct scsi_qla_host *vha = sp->vha;
3184 struct qla_hw_data *ha = vha->hw;
3185 struct req_que *req = NULL;
3186 struct rsp_que *rsp = NULL;
3188 /* Setup device pointers. */
3189 reg = &ha->iobase->isp82;
3190 cmd = GET_CMD_SP(sp);
3192 rsp = ha->rsp_q_map[0];
3194 /* So we know we haven't pci_map'ed anything yet */
3197 dbval = 0x04 | (ha->portnum << 5);
3199 /* Send marker if required */
3200 if (vha->marker_needed != 0) {
3201 if (qla2x00_marker(vha, ha->base_qpair,
3202 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3203 ql_log(ql_log_warn, vha, 0x300c,
3204 "qla2x00_marker failed for cmd=%p.\n", cmd);
3205 return QLA_FUNCTION_FAILED;
3207 vha->marker_needed = 0;
3210 /* Acquire ring specific lock */
3211 spin_lock_irqsave(&ha->hardware_lock, flags);
3213 /* Check for room in outstanding command list. */
3214 handle = req->current_outstanding_cmd;
3215 for (index = 1; index < req->num_outstanding_cmds; index++) {
3217 if (handle == req->num_outstanding_cmds)
3219 if (!req->outstanding_cmds[handle])
3222 if (index == req->num_outstanding_cmds)
3225 /* Map the sg table so we have an accurate count of sg entries needed */
3226 if (scsi_sg_count(cmd)) {
3227 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3228 scsi_sg_count(cmd), cmd->sc_data_direction);
3229 if (unlikely(!nseg))
3236 if (tot_dsds > ql2xshiftctondsd) {
3237 struct cmd_type_6 *cmd_pkt;
3238 uint16_t more_dsd_lists = 0;
3239 struct dsd_dma *dsd_ptr;
3242 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3243 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3244 ql_dbg(ql_dbg_io, vha, 0x300d,
3245 "Num of DSD list %d is than %d for cmd=%p.\n",
3246 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3251 if (more_dsd_lists <= ha->gbl_dsd_avail)
3252 goto sufficient_dsds;
3254 more_dsd_lists -= ha->gbl_dsd_avail;
3256 for (i = 0; i < more_dsd_lists; i++) {
3257 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3259 ql_log(ql_log_fatal, vha, 0x300e,
3260 "Failed to allocate memory for dsd_dma "
3261 "for cmd=%p.\n", cmd);
3265 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3266 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3267 if (!dsd_ptr->dsd_addr) {
3269 ql_log(ql_log_fatal, vha, 0x300f,
3270 "Failed to allocate memory for dsd_addr "
3271 "for cmd=%p.\n", cmd);
3274 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3275 ha->gbl_dsd_avail++;
3281 if (req->cnt < (req_cnt + 2)) {
3282 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3283 ®->req_q_out[0]);
3284 if (req->ring_index < cnt)
3285 req->cnt = cnt - req->ring_index;
3287 req->cnt = req->length -
3288 (req->ring_index - cnt);
3289 if (req->cnt < (req_cnt + 2))
3293 ctx = sp->u.scmd.ctx =
3294 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3296 ql_log(ql_log_fatal, vha, 0x3010,
3297 "Failed to allocate ctx for cmd=%p.\n", cmd);
3301 memset(ctx, 0, sizeof(struct ct6_dsd));
3302 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3303 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3304 if (!ctx->fcp_cmnd) {
3305 ql_log(ql_log_fatal, vha, 0x3011,
3306 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3310 /* Initialize the DSD list and dma handle */
3311 INIT_LIST_HEAD(&ctx->dsd_list);
3312 ctx->dsd_use_cnt = 0;
3314 if (cmd->cmd_len > 16) {
3315 additional_cdb_len = cmd->cmd_len - 16;
3316 if ((cmd->cmd_len % 4) != 0) {
3317 /* SCSI command bigger than 16 bytes must be
3320 ql_log(ql_log_warn, vha, 0x3012,
3321 "scsi cmd len %d not multiple of 4 "
3322 "for cmd=%p.\n", cmd->cmd_len, cmd);
3323 goto queuing_error_fcp_cmnd;
3325 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3327 additional_cdb_len = 0;
3328 ctx->fcp_cmnd_len = 12 + 16 + 4;
3331 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3332 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3334 /* Zero out remaining portion of packet. */
3335 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3336 clr_ptr = (uint32_t *)cmd_pkt + 2;
3337 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3338 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3340 /* Set NPORT-ID and LUN number*/
3341 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3342 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3343 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3344 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3345 cmd_pkt->vp_index = sp->vha->vp_idx;
3347 /* Build IOCB segments */
3348 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3349 goto queuing_error_fcp_cmnd;
3351 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3352 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3354 /* build FCP_CMND IU */
3355 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3356 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3358 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3359 ctx->fcp_cmnd->additional_cdb_len |= 1;
3360 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3361 ctx->fcp_cmnd->additional_cdb_len |= 2;
3363 /* Populate the FCP_PRIO. */
3364 if (ha->flags.fcp_prio_enabled)
3365 ctx->fcp_cmnd->task_attribute |=
3366 sp->fcport->fcp_prio << 3;
3368 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3370 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3371 additional_cdb_len);
3372 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3374 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3375 cmd_pkt->fcp_cmnd_dseg_address[0] =
3376 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3377 cmd_pkt->fcp_cmnd_dseg_address[1] =
3378 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3380 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3381 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3382 /* Set total data segment count. */
3383 cmd_pkt->entry_count = (uint8_t)req_cnt;
3384 /* Specify response queue number where
3385 * completion should happen
3387 cmd_pkt->entry_status = (uint8_t) rsp->id;
3389 struct cmd_type_7 *cmd_pkt;
3390 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3391 if (req->cnt < (req_cnt + 2)) {
3392 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3393 ®->req_q_out[0]);
3394 if (req->ring_index < cnt)
3395 req->cnt = cnt - req->ring_index;
3397 req->cnt = req->length -
3398 (req->ring_index - cnt);
3400 if (req->cnt < (req_cnt + 2))
3403 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3404 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3406 /* Zero out remaining portion of packet. */
3407 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3408 clr_ptr = (uint32_t *)cmd_pkt + 2;
3409 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3410 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3412 /* Set NPORT-ID and LUN number*/
3413 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3414 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3415 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3416 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3417 cmd_pkt->vp_index = sp->vha->vp_idx;
3419 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3420 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3421 sizeof(cmd_pkt->lun));
3423 /* Populate the FCP_PRIO. */
3424 if (ha->flags.fcp_prio_enabled)
3425 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3427 /* Load SCSI command packet. */
3428 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3429 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3431 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3433 /* Build IOCB segments */
3434 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3436 /* Set total data segment count. */
3437 cmd_pkt->entry_count = (uint8_t)req_cnt;
3438 /* Specify response queue number where
3439 * completion should happen.
3441 cmd_pkt->entry_status = (uint8_t) rsp->id;
3444 /* Build command packet. */
3445 req->current_outstanding_cmd = handle;
3446 req->outstanding_cmds[handle] = sp;
3447 sp->handle = handle;
3448 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3449 req->cnt -= req_cnt;
3452 /* Adjust ring index. */
3454 if (req->ring_index == req->length) {
3455 req->ring_index = 0;
3456 req->ring_ptr = req->ring;
3460 sp->flags |= SRB_DMA_VALID;
3462 /* Set chip new ring index. */
3463 /* write, read and verify logic */
3464 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3466 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3468 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3470 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3471 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3476 /* Manage unprocessed RIO/ZIO commands in response queue. */
3477 if (vha->flags.process_response_queue &&
3478 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3479 qla24xx_process_response_queue(vha, rsp);
3481 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3484 queuing_error_fcp_cmnd:
3485 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3488 scsi_dma_unmap(cmd);
3490 if (sp->u.scmd.ctx) {
3491 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3492 sp->u.scmd.ctx = NULL;
3494 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3496 return QLA_FUNCTION_FAILED;
3500 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3502 struct srb_iocb *aio = &sp->u.iocb_cmd;
3503 scsi_qla_host_t *vha = sp->vha;
3504 struct req_que *req = sp->qpair->req;
3506 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3507 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3508 abt_iocb->entry_count = 1;
3509 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3511 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3512 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3513 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3514 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3516 abt_iocb->handle_to_abort =
3517 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3518 aio->u.abt.cmd_hndl));
3519 abt_iocb->vp_index = vha->vp_idx;
3520 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3521 /* Send the command to the firmware */
3526 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3530 mbx->entry_type = MBX_IOCB_TYPE;
3531 mbx->handle = sp->handle;
3532 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3534 for (i = 0; i < sz; i++)
3535 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3539 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3541 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3542 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3543 ct_pkt->handle = sp->handle;
3546 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3547 struct nack_to_isp *nack)
3549 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3551 nack->entry_type = NOTIFY_ACK_TYPE;
3552 nack->entry_count = 1;
3553 nack->ox_id = ntfy->ox_id;
3555 nack->u.isp24.handle = sp->handle;
3556 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3557 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3558 nack->u.isp24.flags = ntfy->u.isp24.flags &
3559 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3561 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3562 nack->u.isp24.status = ntfy->u.isp24.status;
3563 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3564 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3565 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3566 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3567 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3568 nack->u.isp24.srr_flags = 0;
3569 nack->u.isp24.srr_reject_code = 0;
3570 nack->u.isp24.srr_reject_code_expl = 0;
3571 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3575 * Build NVME LS request
3578 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3580 struct srb_iocb *nvme;
3581 int rval = QLA_SUCCESS;
3583 nvme = &sp->u.iocb_cmd;
3584 cmd_pkt->entry_type = PT_LS4_REQUEST;
3585 cmd_pkt->entry_count = 1;
3586 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3588 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3589 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3590 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3592 cmd_pkt->tx_dseg_count = 1;
3593 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3594 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3595 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3596 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3598 cmd_pkt->rx_dseg_count = 1;
3599 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3600 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3601 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3602 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3608 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3612 vce->entry_type = VP_CTRL_IOCB_TYPE;
3613 vce->handle = sp->handle;
3614 vce->entry_count = 1;
3615 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3616 vce->vp_count = cpu_to_le16(1);
3619 * index map in firmware starts with 1; decrement index
3620 * this is ok as we never use index 0
3622 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3623 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3624 vce->vp_idx_map[map] |= 1 << pos;
3628 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3630 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3631 logio->control_flags =
3632 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3634 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3635 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3636 logio->port_id[1] = sp->fcport->d_id.b.area;
3637 logio->port_id[2] = sp->fcport->d_id.b.domain;
3638 logio->vp_index = sp->fcport->vha->vp_idx;
3642 qla2x00_start_sp(srb_t *sp)
3644 int rval = QLA_SUCCESS;
3645 scsi_qla_host_t *vha = sp->vha;
3646 struct qla_hw_data *ha = vha->hw;
3647 struct qla_qpair *qp = sp->qpair;
3649 unsigned long flags;
3651 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3652 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3655 ql_log(ql_log_warn, vha, 0x700c,
3656 "qla2x00_alloc_iocbs failed.\n");
3662 IS_FWI2_CAPABLE(ha) ?
3663 qla24xx_login_iocb(sp, pkt) :
3664 qla2x00_login_iocb(sp, pkt);
3667 qla24xx_prli_iocb(sp, pkt);
3669 case SRB_LOGOUT_CMD:
3670 IS_FWI2_CAPABLE(ha) ?
3671 qla24xx_logout_iocb(sp, pkt) :
3672 qla2x00_logout_iocb(sp, pkt);
3674 case SRB_ELS_CMD_RPT:
3675 case SRB_ELS_CMD_HST:
3676 qla24xx_els_iocb(sp, pkt);
3679 IS_FWI2_CAPABLE(ha) ?
3680 qla24xx_ct_iocb(sp, pkt) :
3681 qla2x00_ct_iocb(sp, pkt);
3684 IS_FWI2_CAPABLE(ha) ?
3685 qla24xx_adisc_iocb(sp, pkt) :
3686 qla2x00_adisc_iocb(sp, pkt);
3690 qlafx00_tm_iocb(sp, pkt) :
3691 qla24xx_tm_iocb(sp, pkt);
3693 case SRB_FXIOCB_DCMD:
3694 case SRB_FXIOCB_BCMD:
3695 qlafx00_fxdisc_iocb(sp, pkt);
3698 qla_nvme_ls(sp, pkt);
3702 qlafx00_abort_iocb(sp, pkt) :
3703 qla24xx_abort_iocb(sp, pkt);
3706 qla24xx_els_logo_iocb(sp, pkt);
3708 case SRB_CT_PTHRU_CMD:
3709 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3712 qla2x00_mb_iocb(sp, pkt);
3714 case SRB_NACK_PLOGI:
3717 qla2x00_send_notify_ack_iocb(sp, pkt);
3720 qla25xx_ctrlvp_iocb(sp, pkt);
3723 qla24xx_prlo_iocb(sp, pkt);
3730 qla2x00_start_iocbs(vha, qp->req);
3732 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3737 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3738 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3740 uint16_t avail_dsds;
3742 uint32_t req_data_len = 0;
3743 uint32_t rsp_data_len = 0;
3744 struct scatterlist *sg;
3746 int entry_count = 1;
3747 struct bsg_job *bsg_job = sp->u.bsg_job;
3749 /*Update entry type to indicate bidir command */
3750 *((uint32_t *)(&cmd_pkt->entry_type)) =
3751 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3753 /* Set the transfer direction, in this set both flags
3754 * Also set the BD_WRAP_BACK flag, firmware will take care
3755 * assigning DID=SID for outgoing pkts.
3757 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3758 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3759 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3762 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3763 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3764 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3765 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3767 vha->bidi_stats.transfer_bytes += req_data_len;
3768 vha->bidi_stats.io_count++;
3770 vha->qla_stats.output_bytes += req_data_len;
3771 vha->qla_stats.output_requests++;
3773 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3774 * are bundled in continuation iocb
3777 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3781 for_each_sg(bsg_job->request_payload.sg_list, sg,
3782 bsg_job->request_payload.sg_cnt, index) {
3784 cont_a64_entry_t *cont_pkt;
3786 /* Allocate additional continuation packets */
3787 if (avail_dsds == 0) {
3788 /* Continuation type 1 IOCB can accomodate
3791 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3792 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3796 sle_dma = sg_dma_address(sg);
3797 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3798 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3799 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3802 /* For read request DSD will always goes to continuation IOCB
3803 * and follow the write DSD. If there is room on the current IOCB
3804 * then it is added to that IOCB else new continuation IOCB is
3807 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3808 bsg_job->reply_payload.sg_cnt, index) {
3810 cont_a64_entry_t *cont_pkt;
3812 /* Allocate additional continuation packets */
3813 if (avail_dsds == 0) {
3814 /* Continuation type 1 IOCB can accomodate
3817 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3818 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3822 sle_dma = sg_dma_address(sg);
3823 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3824 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3825 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3828 /* This value should be same as number of IOCB required for this cmd */
3829 cmd_pkt->entry_count = entry_count;
3833 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3836 struct qla_hw_data *ha = vha->hw;
3837 unsigned long flags;
3843 struct cmd_bidir *cmd_pkt = NULL;
3844 struct rsp_que *rsp;
3845 struct req_que *req;
3846 int rval = EXT_STATUS_OK;
3850 rsp = ha->rsp_q_map[0];
3853 /* Send marker if required */
3854 if (vha->marker_needed != 0) {
3855 if (qla2x00_marker(vha, ha->base_qpair,
3856 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3857 return EXT_STATUS_MAILBOX;
3858 vha->marker_needed = 0;
3861 /* Acquire ring specific lock */
3862 spin_lock_irqsave(&ha->hardware_lock, flags);
3864 /* Check for room in outstanding command list. */
3865 handle = req->current_outstanding_cmd;
3866 for (index = 1; index < req->num_outstanding_cmds; index++) {
3868 if (handle == req->num_outstanding_cmds)
3870 if (!req->outstanding_cmds[handle])
3874 if (index == req->num_outstanding_cmds) {
3875 rval = EXT_STATUS_BUSY;
3879 /* Calculate number of IOCB required */
3880 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3882 /* Check for room on request queue. */
3883 if (req->cnt < req_cnt + 2) {
3884 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3885 RD_REG_DWORD_RELAXED(req->req_q_out);
3886 if (req->ring_index < cnt)
3887 req->cnt = cnt - req->ring_index;
3889 req->cnt = req->length -
3890 (req->ring_index - cnt);
3892 if (req->cnt < req_cnt + 2) {
3893 rval = EXT_STATUS_BUSY;
3897 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3898 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3900 /* Zero out remaining portion of packet. */
3901 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3902 clr_ptr = (uint32_t *)cmd_pkt + 2;
3903 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3905 /* Set NPORT-ID (of vha)*/
3906 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3907 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3908 cmd_pkt->port_id[1] = vha->d_id.b.area;
3909 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3911 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3912 cmd_pkt->entry_status = (uint8_t) rsp->id;
3913 /* Build command packet. */
3914 req->current_outstanding_cmd = handle;
3915 req->outstanding_cmds[handle] = sp;
3916 sp->handle = handle;
3917 req->cnt -= req_cnt;
3919 /* Send the command to the firmware */
3921 qla2x00_start_iocbs(vha, req);
3923 spin_unlock_irqrestore(&ha->hardware_lock, flags);