]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
scsi: qla2xxx: Fix DMA error when the DIF sg buffer crosses 4GB boundary
[linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @sp: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @vha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @vha: HA context
118  * @req: request queue
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         /* Adjust ring index. */
128         req->ring_index++;
129         if (req->ring_index == req->length) {
130                 req->ring_index = 0;
131                 req->ring_ptr = req->ring;
132         } else {
133                 req->ring_ptr++;
134         }
135
136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138         /* Load packet defaults. */
139         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
140             cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141             cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
150         uint8_t guard = scsi_host_get_guard(cmd->device->host);
151
152         /* We always use DIFF Bundling for best performance */
153         *fw_prot_opts = 0;
154
155         /* Translate SCSI opcode to a protection opcode */
156         switch (scsi_get_prot_op(cmd)) {
157         case SCSI_PROT_READ_STRIP:
158                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159                 break;
160         case SCSI_PROT_WRITE_INSERT:
161                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162                 break;
163         case SCSI_PROT_READ_INSERT:
164                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165                 break;
166         case SCSI_PROT_WRITE_STRIP:
167                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168                 break;
169         case SCSI_PROT_READ_PASS:
170         case SCSI_PROT_WRITE_PASS:
171                 if (guard & SHOST_DIX_GUARD_IP)
172                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173                 else
174                         *fw_prot_opts |= PO_MODE_DIF_PASS;
175                 break;
176         default:        /* Normal Request */
177                 *fw_prot_opts |= PO_MODE_DIF_PASS;
178                 break;
179         }
180
181         return scsi_prot_sg_count(cmd);
182 }
183
184 /*
185  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186  * capable IOCB types.
187  *
188  * @sp: SRB command to process
189  * @cmd_pkt: Command type 2 IOCB
190  * @tot_dsds: Total number of segments to transfer
191  */
192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
193     uint16_t tot_dsds)
194 {
195         uint16_t        avail_dsds;
196         uint32_t        *cur_dsd;
197         scsi_qla_host_t *vha;
198         struct scsi_cmnd *cmd;
199         struct scatterlist *sg;
200         int i;
201
202         cmd = GET_CMD_SP(sp);
203
204         /* Update entry type to indicate Command Type 2 IOCB */
205         *((uint32_t *)(&cmd_pkt->entry_type)) =
206             cpu_to_le32(COMMAND_TYPE);
207
208         /* No data transfer */
209         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
210                 cmd_pkt->byte_count = cpu_to_le32(0);
211                 return;
212         }
213
214         vha = sp->vha;
215         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216
217         /* Three DSDs are available in the Command Type 2 IOCB */
218         avail_dsds = 3;
219         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220
221         /* Load data segments */
222         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223                 cont_entry_t *cont_pkt;
224
225                 /* Allocate additional continuation packets? */
226                 if (avail_dsds == 0) {
227                         /*
228                          * Seven DSDs are available in the Continuation
229                          * Type 0 IOCB.
230                          */
231                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
233                         avail_dsds = 7;
234                 }
235
236                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
238                 avail_dsds--;
239         }
240 }
241
242 /**
243  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244  * capable IOCB types.
245  *
246  * @sp: SRB command to process
247  * @cmd_pkt: Command type 3 IOCB
248  * @tot_dsds: Total number of segments to transfer
249  */
250 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
251     uint16_t tot_dsds)
252 {
253         uint16_t        avail_dsds;
254         uint32_t        *cur_dsd;
255         scsi_qla_host_t *vha;
256         struct scsi_cmnd *cmd;
257         struct scatterlist *sg;
258         int i;
259
260         cmd = GET_CMD_SP(sp);
261
262         /* Update entry type to indicate Command Type 3 IOCB */
263         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
264
265         /* No data transfer */
266         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267                 cmd_pkt->byte_count = cpu_to_le32(0);
268                 return;
269         }
270
271         vha = sp->vha;
272         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274         /* Two DSDs are available in the Command Type 3 IOCB */
275         avail_dsds = 2;
276         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278         /* Load data segments */
279         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280                 dma_addr_t      sle_dma;
281                 cont_a64_entry_t *cont_pkt;
282
283                 /* Allocate additional continuation packets? */
284                 if (avail_dsds == 0) {
285                         /*
286                          * Five DSDs are available in the Continuation
287                          * Type 1 IOCB.
288                          */
289                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291                         avail_dsds = 5;
292                 }
293
294                 sle_dma = sg_dma_address(sg);
295                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298                 avail_dsds--;
299         }
300 }
301
302 /**
303  * qla2x00_start_scsi() - Send a SCSI command to the ISP
304  * @sp: command to send to the ISP
305  *
306  * Returns non-zero if a failure occurred, else zero.
307  */
308 int
309 qla2x00_start_scsi(srb_t *sp)
310 {
311         int             nseg;
312         unsigned long   flags;
313         scsi_qla_host_t *vha;
314         struct scsi_cmnd *cmd;
315         uint32_t        *clr_ptr;
316         uint32_t        index;
317         uint32_t        handle;
318         cmd_entry_t     *cmd_pkt;
319         uint16_t        cnt;
320         uint16_t        req_cnt;
321         uint16_t        tot_dsds;
322         struct device_reg_2xxx __iomem *reg;
323         struct qla_hw_data *ha;
324         struct req_que *req;
325         struct rsp_que *rsp;
326
327         /* Setup device pointers. */
328         vha = sp->vha;
329         ha = vha->hw;
330         reg = &ha->iobase->isp;
331         cmd = GET_CMD_SP(sp);
332         req = ha->req_q_map[0];
333         rsp = ha->rsp_q_map[0];
334         /* So we know we haven't pci_map'ed anything yet */
335         tot_dsds = 0;
336
337         /* Send marker if required */
338         if (vha->marker_needed != 0) {
339                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
340                     QLA_SUCCESS) {
341                         return (QLA_FUNCTION_FAILED);
342                 }
343                 vha->marker_needed = 0;
344         }
345
346         /* Acquire ring specific lock */
347         spin_lock_irqsave(&ha->hardware_lock, flags);
348
349         /* Check for room in outstanding command list. */
350         handle = req->current_outstanding_cmd;
351         for (index = 1; index < req->num_outstanding_cmds; index++) {
352                 handle++;
353                 if (handle == req->num_outstanding_cmds)
354                         handle = 1;
355                 if (!req->outstanding_cmds[handle])
356                         break;
357         }
358         if (index == req->num_outstanding_cmds)
359                 goto queuing_error;
360
361         /* Map the sg table so we have an accurate count of sg entries needed */
362         if (scsi_sg_count(cmd)) {
363                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
364                     scsi_sg_count(cmd), cmd->sc_data_direction);
365                 if (unlikely(!nseg))
366                         goto queuing_error;
367         } else
368                 nseg = 0;
369
370         tot_dsds = nseg;
371
372         /* Calculate the number of request entries needed. */
373         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
374         if (req->cnt < (req_cnt + 2)) {
375                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
376                 if (req->ring_index < cnt)
377                         req->cnt = cnt - req->ring_index;
378                 else
379                         req->cnt = req->length -
380                             (req->ring_index - cnt);
381                 /* If still no head room then bail out */
382                 if (req->cnt < (req_cnt + 2))
383                         goto queuing_error;
384         }
385
386         /* Build command packet */
387         req->current_outstanding_cmd = handle;
388         req->outstanding_cmds[handle] = sp;
389         sp->handle = handle;
390         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
391         req->cnt -= req_cnt;
392
393         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
394         cmd_pkt->handle = handle;
395         /* Zero out remaining portion of packet. */
396         clr_ptr = (uint32_t *)cmd_pkt + 2;
397         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
398         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399
400         /* Set target ID and LUN number*/
401         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
402         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
403         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
404
405         /* Load SCSI command packet. */
406         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
407         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
408
409         /* Build IOCB segments */
410         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
411
412         /* Set total data segment count. */
413         cmd_pkt->entry_count = (uint8_t)req_cnt;
414         wmb();
415
416         /* Adjust ring index. */
417         req->ring_index++;
418         if (req->ring_index == req->length) {
419                 req->ring_index = 0;
420                 req->ring_ptr = req->ring;
421         } else
422                 req->ring_ptr++;
423
424         sp->flags |= SRB_DMA_VALID;
425
426         /* Set chip new ring index. */
427         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
428         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
429
430         /* Manage unprocessed RIO/ZIO commands in response queue. */
431         if (vha->flags.process_response_queue &&
432             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
433                 qla2x00_process_response_queue(rsp);
434
435         spin_unlock_irqrestore(&ha->hardware_lock, flags);
436         return (QLA_SUCCESS);
437
438 queuing_error:
439         if (tot_dsds)
440                 scsi_dma_unmap(cmd);
441
442         spin_unlock_irqrestore(&ha->hardware_lock, flags);
443
444         return (QLA_FUNCTION_FAILED);
445 }
446
447 /**
448  * qla2x00_start_iocbs() - Execute the IOCB command
449  * @vha: HA context
450  * @req: request queue
451  */
452 void
453 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
454 {
455         struct qla_hw_data *ha = vha->hw;
456         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
457
458         if (IS_P3P_TYPE(ha)) {
459                 qla82xx_start_iocbs(vha);
460         } else {
461                 /* Adjust ring index. */
462                 req->ring_index++;
463                 if (req->ring_index == req->length) {
464                         req->ring_index = 0;
465                         req->ring_ptr = req->ring;
466                 } else
467                         req->ring_ptr++;
468
469                 /* Set chip new ring index. */
470                 if (ha->mqenable || IS_QLA27XX(ha)) {
471                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
472                 } else if (IS_QLA83XX(ha)) {
473                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
474                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
475                 } else if (IS_QLAFX00(ha)) {
476                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
477                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
478                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
479                 } else if (IS_FWI2_CAPABLE(ha)) {
480                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
481                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
482                 } else {
483                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
484                                 req->ring_index);
485                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
486                 }
487         }
488 }
489
490 /**
491  * qla2x00_marker() - Send a marker IOCB to the firmware.
492  * @vha: HA context
493  * @req: request queue
494  * @rsp: response queue
495  * @loop_id: loop ID
496  * @lun: LUN
497  * @type: marker modifier
498  *
499  * Can be called from both normal and interrupt context.
500  *
501  * Returns non-zero if a failure occurred, else zero.
502  */
503 static int
504 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
505                         struct rsp_que *rsp, uint16_t loop_id,
506                         uint64_t lun, uint8_t type)
507 {
508         mrk_entry_t *mrk;
509         struct mrk_entry_24xx *mrk24 = NULL;
510
511         struct qla_hw_data *ha = vha->hw;
512         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
513
514         req = ha->req_q_map[0];
515         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
516         if (mrk == NULL) {
517                 ql_log(ql_log_warn, base_vha, 0x3026,
518                     "Failed to allocate Marker IOCB.\n");
519
520                 return (QLA_FUNCTION_FAILED);
521         }
522
523         mrk->entry_type = MARKER_TYPE;
524         mrk->modifier = type;
525         if (type != MK_SYNC_ALL) {
526                 if (IS_FWI2_CAPABLE(ha)) {
527                         mrk24 = (struct mrk_entry_24xx *) mrk;
528                         mrk24->nport_handle = cpu_to_le16(loop_id);
529                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531                         mrk24->vp_index = vha->vp_idx;
532                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
533                 } else {
534                         SET_TARGET_ID(ha, mrk->target, loop_id);
535                         mrk->lun = cpu_to_le16((uint16_t)lun);
536                 }
537         }
538         wmb();
539
540         qla2x00_start_iocbs(vha, req);
541
542         return (QLA_SUCCESS);
543 }
544
545 int
546 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
547                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
548                 uint8_t type)
549 {
550         int ret;
551         unsigned long flags = 0;
552
553         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
554         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
555         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
556
557         return (ret);
558 }
559
560 /*
561  * qla2x00_issue_marker
562  *
563  * Issue marker
564  * Caller CAN have hardware lock held as specified by ha_locked parameter.
565  * Might release it, then reaquire.
566  */
567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568 {
569         if (ha_locked) {
570                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
571                                         MK_SYNC_ALL) != QLA_SUCCESS)
572                         return QLA_FUNCTION_FAILED;
573         } else {
574                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
575                                         MK_SYNC_ALL) != QLA_SUCCESS)
576                         return QLA_FUNCTION_FAILED;
577         }
578         vha->marker_needed = 0;
579
580         return QLA_SUCCESS;
581 }
582
583 static inline int
584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585         uint16_t tot_dsds)
586 {
587         uint32_t *cur_dsd = NULL;
588         scsi_qla_host_t *vha;
589         struct qla_hw_data *ha;
590         struct scsi_cmnd *cmd;
591         struct  scatterlist *cur_seg;
592         uint32_t *dsd_seg;
593         void *next_dsd;
594         uint8_t avail_dsds;
595         uint8_t first_iocb = 1;
596         uint32_t dsd_list_len;
597         struct dsd_dma *dsd_ptr;
598         struct ct6_dsd *ctx;
599
600         cmd = GET_CMD_SP(sp);
601
602         /* Update entry type to indicate Command Type 3 IOCB */
603         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
604
605         /* No data transfer */
606         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
607                 cmd_pkt->byte_count = cpu_to_le32(0);
608                 return 0;
609         }
610
611         vha = sp->vha;
612         ha = vha->hw;
613
614         /* Set transfer direction */
615         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
616                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
617                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
618                 vha->qla_stats.output_requests++;
619         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
620                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
621                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622                 vha->qla_stats.input_requests++;
623         }
624
625         cur_seg = scsi_sglist(cmd);
626         ctx = GET_CMD_CTX_SP(sp);
627
628         while (tot_dsds) {
629                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630                     QLA_DSDS_PER_IOCB : tot_dsds;
631                 tot_dsds -= avail_dsds;
632                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
633
634                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635                     struct dsd_dma, list);
636                 next_dsd = dsd_ptr->dsd_addr;
637                 list_del(&dsd_ptr->list);
638                 ha->gbl_dsd_avail--;
639                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640                 ctx->dsd_use_cnt++;
641                 ha->gbl_dsd_inuse++;
642
643                 if (first_iocb) {
644                         first_iocb = 0;
645                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
649                 } else {
650                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
653                 }
654                 cur_dsd = (uint32_t *)next_dsd;
655                 while (avail_dsds) {
656                         dma_addr_t      sle_dma;
657
658                         sle_dma = sg_dma_address(cur_seg);
659                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
662                         cur_seg = sg_next(cur_seg);
663                         avail_dsds--;
664                 }
665         }
666
667         /* Null termination */
668         *cur_dsd++ =  0;
669         *cur_dsd++ = 0;
670         *cur_dsd++ = 0;
671         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
672         return 0;
673 }
674
675 /*
676  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677  * for Command Type 6.
678  *
679  * @dsds: number of data segment decriptors needed
680  *
681  * Returns the number of dsd list needed to store @dsds.
682  */
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds)
685 {
686         uint16_t dsd_lists = 0;
687
688         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689         if (dsds % QLA_DSDS_PER_IOCB)
690                 dsd_lists++;
691         return dsd_lists;
692 }
693
694
695 /**
696  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
697  * IOCB types.
698  *
699  * @sp: SRB command to process
700  * @cmd_pkt: Command type 3 IOCB
701  * @tot_dsds: Total number of segments to transfer
702  * @req: pointer to request queue
703  */
704 inline void
705 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
706         uint16_t tot_dsds, struct req_que *req)
707 {
708         uint16_t        avail_dsds;
709         uint32_t        *cur_dsd;
710         scsi_qla_host_t *vha;
711         struct scsi_cmnd *cmd;
712         struct scatterlist *sg;
713         int i;
714
715         cmd = GET_CMD_SP(sp);
716
717         /* Update entry type to indicate Command Type 3 IOCB */
718         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
719
720         /* No data transfer */
721         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
722                 cmd_pkt->byte_count = cpu_to_le32(0);
723                 return;
724         }
725
726         vha = sp->vha;
727
728         /* Set transfer direction */
729         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
730                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
731                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
732                 vha->qla_stats.output_requests++;
733         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
734                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
735                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
736                 vha->qla_stats.input_requests++;
737         }
738
739         /* One DSD is available in the Command Type 3 IOCB */
740         avail_dsds = 1;
741         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
742
743         /* Load data segments */
744
745         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
746                 dma_addr_t      sle_dma;
747                 cont_a64_entry_t *cont_pkt;
748
749                 /* Allocate additional continuation packets? */
750                 if (avail_dsds == 0) {
751                         /*
752                          * Five DSDs are available in the Continuation
753                          * Type 1 IOCB.
754                          */
755                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
756                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
757                         avail_dsds = 5;
758                 }
759
760                 sle_dma = sg_dma_address(sg);
761                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
762                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
763                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
764                 avail_dsds--;
765         }
766 }
767
768 struct fw_dif_context {
769         uint32_t ref_tag;
770         uint16_t app_tag;
771         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
772         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
773 };
774
775 /*
776  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
777  *
778  */
779 static inline void
780 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
781     unsigned int protcnt)
782 {
783         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
784
785         switch (scsi_get_prot_type(cmd)) {
786         case SCSI_PROT_DIF_TYPE0:
787                 /*
788                  * No check for ql2xenablehba_err_chk, as it would be an
789                  * I/O error if hba tag generation is not done.
790                  */
791                 pkt->ref_tag = cpu_to_le32((uint32_t)
792                     (0xffffffff & scsi_get_lba(cmd)));
793
794                 if (!qla2x00_hba_err_chk_enabled(sp))
795                         break;
796
797                 pkt->ref_tag_mask[0] = 0xff;
798                 pkt->ref_tag_mask[1] = 0xff;
799                 pkt->ref_tag_mask[2] = 0xff;
800                 pkt->ref_tag_mask[3] = 0xff;
801                 break;
802
803         /*
804          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805          * match LBA in CDB + N
806          */
807         case SCSI_PROT_DIF_TYPE2:
808                 pkt->app_tag = cpu_to_le16(0);
809                 pkt->app_tag_mask[0] = 0x0;
810                 pkt->app_tag_mask[1] = 0x0;
811
812                 pkt->ref_tag = cpu_to_le32((uint32_t)
813                     (0xffffffff & scsi_get_lba(cmd)));
814
815                 if (!qla2x00_hba_err_chk_enabled(sp))
816                         break;
817
818                 /* enable ALL bytes of the ref tag */
819                 pkt->ref_tag_mask[0] = 0xff;
820                 pkt->ref_tag_mask[1] = 0xff;
821                 pkt->ref_tag_mask[2] = 0xff;
822                 pkt->ref_tag_mask[3] = 0xff;
823                 break;
824
825         /* For Type 3 protection: 16 bit GUARD only */
826         case SCSI_PROT_DIF_TYPE3:
827                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
828                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
829                                                                 0x00;
830                 break;
831
832         /*
833          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
834          * 16 bit app tag.
835          */
836         case SCSI_PROT_DIF_TYPE1:
837                 pkt->ref_tag = cpu_to_le32((uint32_t)
838                     (0xffffffff & scsi_get_lba(cmd)));
839                 pkt->app_tag = cpu_to_le16(0);
840                 pkt->app_tag_mask[0] = 0x0;
841                 pkt->app_tag_mask[1] = 0x0;
842
843                 if (!qla2x00_hba_err_chk_enabled(sp))
844                         break;
845
846                 /* enable ALL bytes of the ref tag */
847                 pkt->ref_tag_mask[0] = 0xff;
848                 pkt->ref_tag_mask[1] = 0xff;
849                 pkt->ref_tag_mask[2] = 0xff;
850                 pkt->ref_tag_mask[3] = 0xff;
851                 break;
852         }
853 }
854
855 int
856 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
857         uint32_t *partial)
858 {
859         struct scatterlist *sg;
860         uint32_t cumulative_partial, sg_len;
861         dma_addr_t sg_dma_addr;
862
863         if (sgx->num_bytes == sgx->tot_bytes)
864                 return 0;
865
866         sg = sgx->cur_sg;
867         cumulative_partial = sgx->tot_partial;
868
869         sg_dma_addr = sg_dma_address(sg);
870         sg_len = sg_dma_len(sg);
871
872         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
873
874         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
875                 sgx->dma_len = (blk_sz - cumulative_partial);
876                 sgx->tot_partial = 0;
877                 sgx->num_bytes += blk_sz;
878                 *partial = 0;
879         } else {
880                 sgx->dma_len = sg_len - sgx->bytes_consumed;
881                 sgx->tot_partial += sgx->dma_len;
882                 *partial = 1;
883         }
884
885         sgx->bytes_consumed += sgx->dma_len;
886
887         if (sg_len == sgx->bytes_consumed) {
888                 sg = sg_next(sg);
889                 sgx->num_sg++;
890                 sgx->cur_sg = sg;
891                 sgx->bytes_consumed = 0;
892         }
893
894         return 1;
895 }
896
897 int
898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
899         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
900 {
901         void *next_dsd;
902         uint8_t avail_dsds = 0;
903         uint32_t dsd_list_len;
904         struct dsd_dma *dsd_ptr;
905         struct scatterlist *sg_prot;
906         uint32_t *cur_dsd = dsd;
907         uint16_t        used_dsds = tot_dsds;
908         uint32_t        prot_int; /* protection interval */
909         uint32_t        partial;
910         struct qla2_sgx sgx;
911         dma_addr_t      sle_dma;
912         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
913         struct scsi_cmnd *cmd;
914
915         memset(&sgx, 0, sizeof(struct qla2_sgx));
916         if (sp) {
917                 cmd = GET_CMD_SP(sp);
918                 prot_int = cmd->device->sector_size;
919
920                 sgx.tot_bytes = scsi_bufflen(cmd);
921                 sgx.cur_sg = scsi_sglist(cmd);
922                 sgx.sp = sp;
923
924                 sg_prot = scsi_prot_sglist(cmd);
925         } else if (tc) {
926                 prot_int      = tc->blk_sz;
927                 sgx.tot_bytes = tc->bufflen;
928                 sgx.cur_sg    = tc->sg;
929                 sg_prot       = tc->prot_sg;
930         } else {
931                 BUG();
932                 return 1;
933         }
934
935         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
936
937                 sle_dma = sgx.dma_addr;
938                 sle_dma_len = sgx.dma_len;
939 alloc_and_fill:
940                 /* Allocate additional continuation packets? */
941                 if (avail_dsds == 0) {
942                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
943                                         QLA_DSDS_PER_IOCB : used_dsds;
944                         dsd_list_len = (avail_dsds + 1) * 12;
945                         used_dsds -= avail_dsds;
946
947                         /* allocate tracking DS */
948                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
949                         if (!dsd_ptr)
950                                 return 1;
951
952                         /* allocate new list */
953                         dsd_ptr->dsd_addr = next_dsd =
954                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
955                                 &dsd_ptr->dsd_list_dma);
956
957                         if (!next_dsd) {
958                                 /*
959                                  * Need to cleanup only this dsd_ptr, rest
960                                  * will be done by sp_free_dma()
961                                  */
962                                 kfree(dsd_ptr);
963                                 return 1;
964                         }
965
966                         if (sp) {
967                                 list_add_tail(&dsd_ptr->list,
968                                     &((struct crc_context *)
969                                             sp->u.scmd.ctx)->dsd_list);
970
971                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
972                         } else {
973                                 list_add_tail(&dsd_ptr->list,
974                                     &(tc->ctx->dsd_list));
975                                 *tc->ctx_dsd_alloced = 1;
976                         }
977
978
979                         /* add new list to cmd iocb or last list */
980                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
981                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
982                         *cur_dsd++ = dsd_list_len;
983                         cur_dsd = (uint32_t *)next_dsd;
984                 }
985                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
986                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
987                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
988                 avail_dsds--;
989
990                 if (partial == 0) {
991                         /* Got a full protection interval */
992                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
993                         sle_dma_len = 8;
994
995                         tot_prot_dma_len += sle_dma_len;
996                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
997                                 tot_prot_dma_len = 0;
998                                 sg_prot = sg_next(sg_prot);
999                         }
1000
1001                         partial = 1; /* So as to not re-enter this block */
1002                         goto alloc_and_fill;
1003                 }
1004         }
1005         /* Null termination */
1006         *cur_dsd++ = 0;
1007         *cur_dsd++ = 0;
1008         *cur_dsd++ = 0;
1009         return 0;
1010 }
1011
1012 int
1013 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1014         uint16_t tot_dsds, struct qla_tc_param *tc)
1015 {
1016         void *next_dsd;
1017         uint8_t avail_dsds = 0;
1018         uint32_t dsd_list_len;
1019         struct dsd_dma *dsd_ptr;
1020         struct scatterlist *sg, *sgl;
1021         uint32_t *cur_dsd = dsd;
1022         int     i;
1023         uint16_t        used_dsds = tot_dsds;
1024         struct scsi_cmnd *cmd;
1025
1026         if (sp) {
1027                 cmd = GET_CMD_SP(sp);
1028                 sgl = scsi_sglist(cmd);
1029         } else if (tc) {
1030                 sgl = tc->sg;
1031         } else {
1032                 BUG();
1033                 return 1;
1034         }
1035
1036
1037         for_each_sg(sgl, sg, tot_dsds, i) {
1038                 dma_addr_t      sle_dma;
1039
1040                 /* Allocate additional continuation packets? */
1041                 if (avail_dsds == 0) {
1042                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1043                                         QLA_DSDS_PER_IOCB : used_dsds;
1044                         dsd_list_len = (avail_dsds + 1) * 12;
1045                         used_dsds -= avail_dsds;
1046
1047                         /* allocate tracking DS */
1048                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1049                         if (!dsd_ptr)
1050                                 return 1;
1051
1052                         /* allocate new list */
1053                         dsd_ptr->dsd_addr = next_dsd =
1054                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1055                                 &dsd_ptr->dsd_list_dma);
1056
1057                         if (!next_dsd) {
1058                                 /*
1059                                  * Need to cleanup only this dsd_ptr, rest
1060                                  * will be done by sp_free_dma()
1061                                  */
1062                                 kfree(dsd_ptr);
1063                                 return 1;
1064                         }
1065
1066                         if (sp) {
1067                                 list_add_tail(&dsd_ptr->list,
1068                                     &((struct crc_context *)
1069                                             sp->u.scmd.ctx)->dsd_list);
1070
1071                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1072                         } else {
1073                                 list_add_tail(&dsd_ptr->list,
1074                                     &(tc->ctx->dsd_list));
1075                                 *tc->ctx_dsd_alloced = 1;
1076                         }
1077
1078                         /* add new list to cmd iocb or last list */
1079                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1080                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1081                         *cur_dsd++ = dsd_list_len;
1082                         cur_dsd = (uint32_t *)next_dsd;
1083                 }
1084                 sle_dma = sg_dma_address(sg);
1085
1086                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1087                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1088                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1089                 avail_dsds--;
1090
1091         }
1092         /* Null termination */
1093         *cur_dsd++ = 0;
1094         *cur_dsd++ = 0;
1095         *cur_dsd++ = 0;
1096         return 0;
1097 }
1098
1099 int
1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1101     uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1102 {
1103         struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1104         struct scatterlist *sg, *sgl;
1105         struct crc_context *difctx = NULL;
1106         struct scsi_qla_host *vha;
1107         uint dsd_list_len;
1108         uint avail_dsds = 0;
1109         uint used_dsds = tot_dsds;
1110         bool dif_local_dma_alloc = false;
1111         bool direction_to_device = false;
1112         int i;
1113
1114         if (sp) {
1115                 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1116                 sgl = scsi_prot_sglist(cmd);
1117                 vha = sp->vha;
1118                 difctx = sp->u.scmd.ctx;
1119                 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1120                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1121                   "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1122                         __func__, cmd, difctx, sp);
1123         } else if (tc) {
1124                 vha = tc->vha;
1125                 sgl = tc->prot_sg;
1126                 difctx = tc->ctx;
1127                 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1128         } else {
1129                 BUG();
1130                 return 1;
1131         }
1132
1133         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1134             "%s: enter (write=%u)\n", __func__, direction_to_device);
1135
1136         /* if initiator doing write or target doing read */
1137         if (direction_to_device) {
1138                 for_each_sg(sgl, sg, tot_dsds, i) {
1139                         dma_addr_t sle_phys = sg_phys(sg);
1140
1141                         /* If SGE addr + len flips bits in upper 32-bits */
1142                         if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1143                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1144                                     "%s: page boundary crossing (phys=%llx len=%x)\n",
1145                                     __func__, sle_phys, sg->length);
1146
1147                                 if (difctx) {
1148                                         ha->dif_bundle_crossed_pages++;
1149                                         dif_local_dma_alloc = true;
1150                                 } else {
1151                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1152                                             vha, 0xe022,
1153                                             "%s: difctx pointer is NULL\n",
1154                                             __func__);
1155                                 }
1156                                 break;
1157                         }
1158                 }
1159                 ha->dif_bundle_writes++;
1160         } else {
1161                 ha->dif_bundle_reads++;
1162         }
1163
1164         if (ql2xdifbundlinginternalbuffers)
1165                 dif_local_dma_alloc = direction_to_device;
1166
1167         if (dif_local_dma_alloc) {
1168                 u32 track_difbundl_buf = 0;
1169                 u32 ldma_sg_len = 0;
1170                 u8 ldma_needed = 1;
1171
1172                 difctx->no_dif_bundl = 0;
1173                 difctx->dif_bundl_len = 0;
1174
1175                 /* Track DSD buffers */
1176                 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1177                 /* Track local DMA buffers */
1178                 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1179
1180                 for_each_sg(sgl, sg, tot_dsds, i) {
1181                         u32 sglen = sg_dma_len(sg);
1182
1183                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1184                             "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1185                             __func__, i, sg_phys(sg), sglen, ldma_sg_len,
1186                             difctx->dif_bundl_len, ldma_needed);
1187
1188                         while (sglen) {
1189                                 u32 xfrlen = 0;
1190
1191                                 if (ldma_needed) {
1192                                         /*
1193                                          * Allocate list item to store
1194                                          * the DMA buffers
1195                                          */
1196                                         dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1197                                             GFP_ATOMIC);
1198                                         if (!dsd_ptr) {
1199                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1200                                                     "%s: failed alloc dsd_ptr\n",
1201                                                     __func__);
1202                                                 return 1;
1203                                         }
1204                                         ha->dif_bundle_kallocs++;
1205
1206                                         /* allocate dma buffer */
1207                                         dsd_ptr->dsd_addr = dma_pool_alloc
1208                                                 (ha->dif_bundl_pool, GFP_ATOMIC,
1209                                                  &dsd_ptr->dsd_list_dma);
1210                                         if (!dsd_ptr->dsd_addr) {
1211                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1212                                                     "%s: failed alloc ->dsd_ptr\n",
1213                                                     __func__);
1214                                                 /*
1215                                                  * need to cleanup only this
1216                                                  * dsd_ptr rest will be done
1217                                                  * by sp_free_dma()
1218                                                  */
1219                                                 kfree(dsd_ptr);
1220                                                 ha->dif_bundle_kallocs--;
1221                                                 return 1;
1222                                         }
1223                                         ha->dif_bundle_dma_allocs++;
1224                                         ldma_needed = 0;
1225                                         difctx->no_dif_bundl++;
1226                                         list_add_tail(&dsd_ptr->list,
1227                                             &difctx->ldif_dma_hndl_list);
1228                                 }
1229
1230                                 /* xfrlen is min of dma pool size and sglen */
1231                                 xfrlen = (sglen >
1232                                    (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1233                                     DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1234                                     sglen;
1235
1236                                 /* replace with local allocated dma buffer */
1237                                 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1238                                     dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1239                                     difctx->dif_bundl_len);
1240                                 difctx->dif_bundl_len += xfrlen;
1241                                 sglen -= xfrlen;
1242                                 ldma_sg_len += xfrlen;
1243                                 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1244                                     sg_is_last(sg)) {
1245                                         ldma_needed = 1;
1246                                         ldma_sg_len = 0;
1247                                 }
1248                         }
1249                 }
1250
1251                 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1252                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1253                     "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1254                     difctx->dif_bundl_len, difctx->no_dif_bundl,
1255                     track_difbundl_buf);
1256
1257                 if (sp)
1258                         sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1259                 else
1260                         tc->prot_flags = DIF_BUNDL_DMA_VALID;
1261
1262                 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1263                     &difctx->ldif_dma_hndl_list, list) {
1264                         u32 sglen = (difctx->dif_bundl_len >
1265                             DIF_BUNDLING_DMA_POOL_SIZE) ?
1266                             DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1267
1268                         BUG_ON(track_difbundl_buf == 0);
1269
1270                         /* Allocate additional continuation packets? */
1271                         if (avail_dsds == 0) {
1272                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1273                                     0xe024,
1274                                     "%s: adding continuation iocb's\n",
1275                                     __func__);
1276                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1277                                     QLA_DSDS_PER_IOCB : used_dsds;
1278                                 dsd_list_len = (avail_dsds + 1) * 12;
1279                                 used_dsds -= avail_dsds;
1280
1281                                 /* allocate tracking DS */
1282                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1283                                 if (!dsd_ptr) {
1284                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1285                                             "%s: failed alloc dsd_ptr\n",
1286                                             __func__);
1287                                         return 1;
1288                                 }
1289                                 ha->dif_bundle_kallocs++;
1290
1291                                 difctx->no_ldif_dsd++;
1292                                 /* allocate new list */
1293                                 dsd_ptr->dsd_addr =
1294                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1295                                         &dsd_ptr->dsd_list_dma);
1296                                 if (!dsd_ptr->dsd_addr) {
1297                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1298                                             "%s: failed alloc ->dsd_addr\n",
1299                                             __func__);
1300                                         /*
1301                                          * need to cleanup only this dsd_ptr
1302                                          *  rest will be done by sp_free_dma()
1303                                          */
1304                                         kfree(dsd_ptr);
1305                                         ha->dif_bundle_kallocs--;
1306                                         return 1;
1307                                 }
1308                                 ha->dif_bundle_dma_allocs++;
1309
1310                                 if (sp) {
1311                                         list_add_tail(&dsd_ptr->list,
1312                                             &difctx->ldif_dsd_list);
1313                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1314                                 } else {
1315                                         list_add_tail(&dsd_ptr->list,
1316                                             &difctx->ldif_dsd_list);
1317                                         tc->ctx_dsd_alloced = 1;
1318                                 }
1319
1320                                 /* add new list to cmd iocb or last list */
1321                                 *cur_dsd++ =
1322                                     cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1323                                 *cur_dsd++ =
1324                                     cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1325                                 *cur_dsd++ = dsd_list_len;
1326                                 cur_dsd = dsd_ptr->dsd_addr;
1327                         }
1328                         *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma));
1329                         *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma));
1330                         *cur_dsd++ = cpu_to_le32(sglen);
1331                         avail_dsds--;
1332                         difctx->dif_bundl_len -= sglen;
1333                         track_difbundl_buf--;
1334                 }
1335
1336                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1337                     "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1338                         difctx->no_ldif_dsd, difctx->no_dif_bundl);
1339         } else {
1340                 for_each_sg(sgl, sg, tot_dsds, i) {
1341                         dma_addr_t sle_dma;
1342
1343                         /* Allocate additional continuation packets? */
1344                         if (avail_dsds == 0) {
1345                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1346                                     QLA_DSDS_PER_IOCB : used_dsds;
1347                                 dsd_list_len = (avail_dsds + 1) * 12;
1348                                 used_dsds -= avail_dsds;
1349
1350                                 /* allocate tracking DS */
1351                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1352                                 if (!dsd_ptr) {
1353                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1354                                             vha, 0xe027,
1355                                             "%s: failed alloc dsd_dma...\n",
1356                                             __func__);
1357                                         return 1;
1358                                 }
1359
1360                                 /* allocate new list */
1361                                 dsd_ptr->dsd_addr =
1362                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1363                                         &dsd_ptr->dsd_list_dma);
1364                                 if (!dsd_ptr->dsd_addr) {
1365                                         /* need to cleanup only this dsd_ptr */
1366                                         /* rest will be done by sp_free_dma() */
1367                                         kfree(dsd_ptr);
1368                                         return 1;
1369                                 }
1370
1371                                 if (sp) {
1372                                         list_add_tail(&dsd_ptr->list,
1373                                             &difctx->dsd_list);
1374                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1375                                 } else {
1376                                         list_add_tail(&dsd_ptr->list,
1377                                             &difctx->dsd_list);
1378                                         tc->ctx_dsd_alloced = 1;
1379                                 }
1380
1381                                 /* add new list to cmd iocb or last list */
1382                                 *cur_dsd++ =
1383                                     cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1384                                 *cur_dsd++ =
1385                                     cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1386                                 *cur_dsd++ = dsd_list_len;
1387                                 cur_dsd = dsd_ptr->dsd_addr;
1388                         }
1389                         sle_dma = sg_dma_address(sg);
1390                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1391                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1392                         *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1393                         avail_dsds--;
1394                 }
1395         }
1396         /* Null termination */
1397         *cur_dsd++ = 0;
1398         *cur_dsd++ = 0;
1399         *cur_dsd++ = 0;
1400         return 0;
1401 }
1402 /**
1403  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1404  *                                                      Type 6 IOCB types.
1405  *
1406  * @sp: SRB command to process
1407  * @cmd_pkt: Command type 3 IOCB
1408  * @tot_dsds: Total number of segments to transfer
1409  * @tot_prot_dsds: Total number of segments with protection information
1410  * @fw_prot_opts: Protection options to be passed to firmware
1411  */
1412 inline int
1413 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1414     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1415 {
1416         uint32_t                *cur_dsd, *fcp_dl;
1417         scsi_qla_host_t         *vha;
1418         struct scsi_cmnd        *cmd;
1419         uint32_t                total_bytes = 0;
1420         uint32_t                data_bytes;
1421         uint32_t                dif_bytes;
1422         uint8_t                 bundling = 1;
1423         uint16_t                blk_size;
1424         struct crc_context      *crc_ctx_pkt = NULL;
1425         struct qla_hw_data      *ha;
1426         uint8_t                 additional_fcpcdb_len;
1427         uint16_t                fcp_cmnd_len;
1428         struct fcp_cmnd         *fcp_cmnd;
1429         dma_addr_t              crc_ctx_dma;
1430
1431         cmd = GET_CMD_SP(sp);
1432
1433         /* Update entry type to indicate Command Type CRC_2 IOCB */
1434         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1435
1436         vha = sp->vha;
1437         ha = vha->hw;
1438
1439         /* No data transfer */
1440         data_bytes = scsi_bufflen(cmd);
1441         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1442                 cmd_pkt->byte_count = cpu_to_le32(0);
1443                 return QLA_SUCCESS;
1444         }
1445
1446         cmd_pkt->vp_index = sp->vha->vp_idx;
1447
1448         /* Set transfer direction */
1449         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1450                 cmd_pkt->control_flags =
1451                     cpu_to_le16(CF_WRITE_DATA);
1452         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1453                 cmd_pkt->control_flags =
1454                     cpu_to_le16(CF_READ_DATA);
1455         }
1456
1457         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1458             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1459             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1460             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1461                 bundling = 0;
1462
1463         /* Allocate CRC context from global pool */
1464         crc_ctx_pkt = sp->u.scmd.ctx =
1465             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1466
1467         if (!crc_ctx_pkt)
1468                 goto crc_queuing_error;
1469
1470         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1471
1472         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1473
1474         /* Set handle */
1475         crc_ctx_pkt->handle = cmd_pkt->handle;
1476
1477         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1478
1479         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1480             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1481
1482         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1483         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1484         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1485
1486         /* Determine SCSI command length -- align to 4 byte boundary */
1487         if (cmd->cmd_len > 16) {
1488                 additional_fcpcdb_len = cmd->cmd_len - 16;
1489                 if ((cmd->cmd_len % 4) != 0) {
1490                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1491                         goto crc_queuing_error;
1492                 }
1493                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1494         } else {
1495                 additional_fcpcdb_len = 0;
1496                 fcp_cmnd_len = 12 + 16 + 4;
1497         }
1498
1499         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1500
1501         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1502         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1503                 fcp_cmnd->additional_cdb_len |= 1;
1504         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1505                 fcp_cmnd->additional_cdb_len |= 2;
1506
1507         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1508         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1509         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1510         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1511             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1512         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1513             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1514         fcp_cmnd->task_management = 0;
1515         fcp_cmnd->task_attribute = TSK_SIMPLE;
1516
1517         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1518
1519         /* Compute dif len and adjust data len to incude protection */
1520         dif_bytes = 0;
1521         blk_size = cmd->device->sector_size;
1522         dif_bytes = (data_bytes / blk_size) * 8;
1523
1524         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1525         case SCSI_PROT_READ_INSERT:
1526         case SCSI_PROT_WRITE_STRIP:
1527             total_bytes = data_bytes;
1528             data_bytes += dif_bytes;
1529             break;
1530
1531         case SCSI_PROT_READ_STRIP:
1532         case SCSI_PROT_WRITE_INSERT:
1533         case SCSI_PROT_READ_PASS:
1534         case SCSI_PROT_WRITE_PASS:
1535             total_bytes = data_bytes + dif_bytes;
1536             break;
1537         default:
1538             BUG();
1539         }
1540
1541         if (!qla2x00_hba_err_chk_enabled(sp))
1542                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1543         /* HBA error checking enabled */
1544         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1545                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1546                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1547                         SCSI_PROT_DIF_TYPE2))
1548                         fw_prot_opts |= BIT_10;
1549                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1550                     SCSI_PROT_DIF_TYPE3)
1551                         fw_prot_opts |= BIT_11;
1552         }
1553
1554         if (!bundling) {
1555                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1556         } else {
1557                 /*
1558                  * Configure Bundling if we need to fetch interlaving
1559                  * protection PCI accesses
1560                  */
1561                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1562                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1563                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1564                                                         tot_prot_dsds);
1565                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1566         }
1567
1568         /* Finish the common fields of CRC pkt */
1569         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1570         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1571         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1572         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1573         /* Fibre channel byte count */
1574         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1575         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1576             additional_fcpcdb_len);
1577         *fcp_dl = htonl(total_bytes);
1578
1579         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1580                 cmd_pkt->byte_count = cpu_to_le32(0);
1581                 return QLA_SUCCESS;
1582         }
1583         /* Walks data segments */
1584
1585         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1586
1587         if (!bundling && tot_prot_dsds) {
1588                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1589                         cur_dsd, tot_dsds, NULL))
1590                         goto crc_queuing_error;
1591         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1592                         (tot_dsds - tot_prot_dsds), NULL))
1593                 goto crc_queuing_error;
1594
1595         if (bundling && tot_prot_dsds) {
1596                 /* Walks dif segments */
1597                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1598                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1599                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1600                                 tot_prot_dsds, NULL))
1601                         goto crc_queuing_error;
1602         }
1603         return QLA_SUCCESS;
1604
1605 crc_queuing_error:
1606         /* Cleanup will be performed by the caller */
1607
1608         return QLA_FUNCTION_FAILED;
1609 }
1610
1611 /**
1612  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1613  * @sp: command to send to the ISP
1614  *
1615  * Returns non-zero if a failure occurred, else zero.
1616  */
1617 int
1618 qla24xx_start_scsi(srb_t *sp)
1619 {
1620         int             nseg;
1621         unsigned long   flags;
1622         uint32_t        *clr_ptr;
1623         uint32_t        index;
1624         uint32_t        handle;
1625         struct cmd_type_7 *cmd_pkt;
1626         uint16_t        cnt;
1627         uint16_t        req_cnt;
1628         uint16_t        tot_dsds;
1629         struct req_que *req = NULL;
1630         struct rsp_que *rsp = NULL;
1631         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1632         struct scsi_qla_host *vha = sp->vha;
1633         struct qla_hw_data *ha = vha->hw;
1634
1635         /* Setup device pointers. */
1636         req = vha->req;
1637         rsp = req->rsp;
1638
1639         /* So we know we haven't pci_map'ed anything yet */
1640         tot_dsds = 0;
1641
1642         /* Send marker if required */
1643         if (vha->marker_needed != 0) {
1644                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1645                     QLA_SUCCESS)
1646                         return QLA_FUNCTION_FAILED;
1647                 vha->marker_needed = 0;
1648         }
1649
1650         /* Acquire ring specific lock */
1651         spin_lock_irqsave(&ha->hardware_lock, flags);
1652
1653         /* Check for room in outstanding command list. */
1654         handle = req->current_outstanding_cmd;
1655         for (index = 1; index < req->num_outstanding_cmds; index++) {
1656                 handle++;
1657                 if (handle == req->num_outstanding_cmds)
1658                         handle = 1;
1659                 if (!req->outstanding_cmds[handle])
1660                         break;
1661         }
1662         if (index == req->num_outstanding_cmds)
1663                 goto queuing_error;
1664
1665         /* Map the sg table so we have an accurate count of sg entries needed */
1666         if (scsi_sg_count(cmd)) {
1667                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1668                     scsi_sg_count(cmd), cmd->sc_data_direction);
1669                 if (unlikely(!nseg))
1670                         goto queuing_error;
1671         } else
1672                 nseg = 0;
1673
1674         tot_dsds = nseg;
1675         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1676         if (req->cnt < (req_cnt + 2)) {
1677                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1678                     RD_REG_DWORD_RELAXED(req->req_q_out);
1679                 if (req->ring_index < cnt)
1680                         req->cnt = cnt - req->ring_index;
1681                 else
1682                         req->cnt = req->length -
1683                                 (req->ring_index - cnt);
1684                 if (req->cnt < (req_cnt + 2))
1685                         goto queuing_error;
1686         }
1687
1688         /* Build command packet. */
1689         req->current_outstanding_cmd = handle;
1690         req->outstanding_cmds[handle] = sp;
1691         sp->handle = handle;
1692         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1693         req->cnt -= req_cnt;
1694
1695         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1696         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1697
1698         /* Zero out remaining portion of packet. */
1699         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1700         clr_ptr = (uint32_t *)cmd_pkt + 2;
1701         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1702         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1703
1704         /* Set NPORT-ID and LUN number*/
1705         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1706         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1707         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1708         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1709         cmd_pkt->vp_index = sp->vha->vp_idx;
1710
1711         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1712         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1713
1714         cmd_pkt->task = TSK_SIMPLE;
1715
1716         /* Load SCSI command packet. */
1717         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1718         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1719
1720         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1721
1722         /* Build IOCB segments */
1723         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1724
1725         /* Set total data segment count. */
1726         cmd_pkt->entry_count = (uint8_t)req_cnt;
1727         wmb();
1728         /* Adjust ring index. */
1729         req->ring_index++;
1730         if (req->ring_index == req->length) {
1731                 req->ring_index = 0;
1732                 req->ring_ptr = req->ring;
1733         } else
1734                 req->ring_ptr++;
1735
1736         sp->flags |= SRB_DMA_VALID;
1737
1738         /* Set chip new ring index. */
1739         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1740
1741         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1742         return QLA_SUCCESS;
1743
1744 queuing_error:
1745         if (tot_dsds)
1746                 scsi_dma_unmap(cmd);
1747
1748         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1749
1750         return QLA_FUNCTION_FAILED;
1751 }
1752
1753 /**
1754  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1755  * @sp: command to send to the ISP
1756  *
1757  * Returns non-zero if a failure occurred, else zero.
1758  */
1759 int
1760 qla24xx_dif_start_scsi(srb_t *sp)
1761 {
1762         int                     nseg;
1763         unsigned long           flags;
1764         uint32_t                *clr_ptr;
1765         uint32_t                index;
1766         uint32_t                handle;
1767         uint16_t                cnt;
1768         uint16_t                req_cnt = 0;
1769         uint16_t                tot_dsds;
1770         uint16_t                tot_prot_dsds;
1771         uint16_t                fw_prot_opts = 0;
1772         struct req_que          *req = NULL;
1773         struct rsp_que          *rsp = NULL;
1774         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1775         struct scsi_qla_host    *vha = sp->vha;
1776         struct qla_hw_data      *ha = vha->hw;
1777         struct cmd_type_crc_2   *cmd_pkt;
1778         uint32_t                status = 0;
1779
1780 #define QDSS_GOT_Q_SPACE        BIT_0
1781
1782         /* Only process protection or >16 cdb in this routine */
1783         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1784                 if (cmd->cmd_len <= 16)
1785                         return qla24xx_start_scsi(sp);
1786         }
1787
1788         /* Setup device pointers. */
1789         req = vha->req;
1790         rsp = req->rsp;
1791
1792         /* So we know we haven't pci_map'ed anything yet */
1793         tot_dsds = 0;
1794
1795         /* Send marker if required */
1796         if (vha->marker_needed != 0) {
1797                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1798                     QLA_SUCCESS)
1799                         return QLA_FUNCTION_FAILED;
1800                 vha->marker_needed = 0;
1801         }
1802
1803         /* Acquire ring specific lock */
1804         spin_lock_irqsave(&ha->hardware_lock, flags);
1805
1806         /* Check for room in outstanding command list. */
1807         handle = req->current_outstanding_cmd;
1808         for (index = 1; index < req->num_outstanding_cmds; index++) {
1809                 handle++;
1810                 if (handle == req->num_outstanding_cmds)
1811                         handle = 1;
1812                 if (!req->outstanding_cmds[handle])
1813                         break;
1814         }
1815
1816         if (index == req->num_outstanding_cmds)
1817                 goto queuing_error;
1818
1819         /* Compute number of required data segments */
1820         /* Map the sg table so we have an accurate count of sg entries needed */
1821         if (scsi_sg_count(cmd)) {
1822                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1823                     scsi_sg_count(cmd), cmd->sc_data_direction);
1824                 if (unlikely(!nseg))
1825                         goto queuing_error;
1826                 else
1827                         sp->flags |= SRB_DMA_VALID;
1828
1829                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1830                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1831                         struct qla2_sgx sgx;
1832                         uint32_t        partial;
1833
1834                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1835                         sgx.tot_bytes = scsi_bufflen(cmd);
1836                         sgx.cur_sg = scsi_sglist(cmd);
1837                         sgx.sp = sp;
1838
1839                         nseg = 0;
1840                         while (qla24xx_get_one_block_sg(
1841                             cmd->device->sector_size, &sgx, &partial))
1842                                 nseg++;
1843                 }
1844         } else
1845                 nseg = 0;
1846
1847         /* number of required data segments */
1848         tot_dsds = nseg;
1849
1850         /* Compute number of required protection segments */
1851         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1852                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1853                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1854                 if (unlikely(!nseg))
1855                         goto queuing_error;
1856                 else
1857                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1858
1859                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1860                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1861                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1862                 }
1863         } else {
1864                 nseg = 0;
1865         }
1866
1867         req_cnt = 1;
1868         /* Total Data and protection sg segment(s) */
1869         tot_prot_dsds = nseg;
1870         tot_dsds += nseg;
1871         if (req->cnt < (req_cnt + 2)) {
1872                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1873                     RD_REG_DWORD_RELAXED(req->req_q_out);
1874                 if (req->ring_index < cnt)
1875                         req->cnt = cnt - req->ring_index;
1876                 else
1877                         req->cnt = req->length -
1878                                 (req->ring_index - cnt);
1879                 if (req->cnt < (req_cnt + 2))
1880                         goto queuing_error;
1881         }
1882
1883         status |= QDSS_GOT_Q_SPACE;
1884
1885         /* Build header part of command packet (excluding the OPCODE). */
1886         req->current_outstanding_cmd = handle;
1887         req->outstanding_cmds[handle] = sp;
1888         sp->handle = handle;
1889         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1890         req->cnt -= req_cnt;
1891
1892         /* Fill-in common area */
1893         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1894         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1895
1896         clr_ptr = (uint32_t *)cmd_pkt + 2;
1897         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1898
1899         /* Set NPORT-ID and LUN number*/
1900         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1901         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1902         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1903         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1904
1905         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1906         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1907
1908         /* Total Data and protection segment(s) */
1909         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1910
1911         /* Build IOCB segments and adjust for data protection segments */
1912         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1913             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1914                 QLA_SUCCESS)
1915                 goto queuing_error;
1916
1917         cmd_pkt->entry_count = (uint8_t)req_cnt;
1918         /* Specify response queue number where completion should happen */
1919         cmd_pkt->entry_status = (uint8_t) rsp->id;
1920         cmd_pkt->timeout = cpu_to_le16(0);
1921         wmb();
1922
1923         /* Adjust ring index. */
1924         req->ring_index++;
1925         if (req->ring_index == req->length) {
1926                 req->ring_index = 0;
1927                 req->ring_ptr = req->ring;
1928         } else
1929                 req->ring_ptr++;
1930
1931         /* Set chip new ring index. */
1932         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1933
1934         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1935
1936         return QLA_SUCCESS;
1937
1938 queuing_error:
1939         if (status & QDSS_GOT_Q_SPACE) {
1940                 req->outstanding_cmds[handle] = NULL;
1941                 req->cnt += req_cnt;
1942         }
1943         /* Cleanup will be performed by the caller (queuecommand) */
1944
1945         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1946         return QLA_FUNCTION_FAILED;
1947 }
1948
1949 /**
1950  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1951  * @sp: command to send to the ISP
1952  *
1953  * Returns non-zero if a failure occurred, else zero.
1954  */
1955 static int
1956 qla2xxx_start_scsi_mq(srb_t *sp)
1957 {
1958         int             nseg;
1959         unsigned long   flags;
1960         uint32_t        *clr_ptr;
1961         uint32_t        index;
1962         uint32_t        handle;
1963         struct cmd_type_7 *cmd_pkt;
1964         uint16_t        cnt;
1965         uint16_t        req_cnt;
1966         uint16_t        tot_dsds;
1967         struct req_que *req = NULL;
1968         struct rsp_que *rsp = NULL;
1969         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1970         struct scsi_qla_host *vha = sp->fcport->vha;
1971         struct qla_hw_data *ha = vha->hw;
1972         struct qla_qpair *qpair = sp->qpair;
1973
1974         /* Acquire qpair specific lock */
1975         spin_lock_irqsave(&qpair->qp_lock, flags);
1976
1977         /* Setup qpair pointers */
1978         rsp = qpair->rsp;
1979         req = qpair->req;
1980
1981         /* So we know we haven't pci_map'ed anything yet */
1982         tot_dsds = 0;
1983
1984         /* Send marker if required */
1985         if (vha->marker_needed != 0) {
1986                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1987                     QLA_SUCCESS) {
1988                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1989                         return QLA_FUNCTION_FAILED;
1990                 }
1991                 vha->marker_needed = 0;
1992         }
1993
1994         /* Check for room in outstanding command list. */
1995         handle = req->current_outstanding_cmd;
1996         for (index = 1; index < req->num_outstanding_cmds; index++) {
1997                 handle++;
1998                 if (handle == req->num_outstanding_cmds)
1999                         handle = 1;
2000                 if (!req->outstanding_cmds[handle])
2001                         break;
2002         }
2003         if (index == req->num_outstanding_cmds)
2004                 goto queuing_error;
2005
2006         /* Map the sg table so we have an accurate count of sg entries needed */
2007         if (scsi_sg_count(cmd)) {
2008                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2009                     scsi_sg_count(cmd), cmd->sc_data_direction);
2010                 if (unlikely(!nseg))
2011                         goto queuing_error;
2012         } else
2013                 nseg = 0;
2014
2015         tot_dsds = nseg;
2016         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2017         if (req->cnt < (req_cnt + 2)) {
2018                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2019                     RD_REG_DWORD_RELAXED(req->req_q_out);
2020                 if (req->ring_index < cnt)
2021                         req->cnt = cnt - req->ring_index;
2022                 else
2023                         req->cnt = req->length -
2024                                 (req->ring_index - cnt);
2025                 if (req->cnt < (req_cnt + 2))
2026                         goto queuing_error;
2027         }
2028
2029         /* Build command packet. */
2030         req->current_outstanding_cmd = handle;
2031         req->outstanding_cmds[handle] = sp;
2032         sp->handle = handle;
2033         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2034         req->cnt -= req_cnt;
2035
2036         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2037         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2038
2039         /* Zero out remaining portion of packet. */
2040         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2041         clr_ptr = (uint32_t *)cmd_pkt + 2;
2042         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2043         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2044
2045         /* Set NPORT-ID and LUN number*/
2046         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2047         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2048         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2049         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2050         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2051
2052         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2053         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2054
2055         cmd_pkt->task = TSK_SIMPLE;
2056
2057         /* Load SCSI command packet. */
2058         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2059         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2060
2061         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2062
2063         /* Build IOCB segments */
2064         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2065
2066         /* Set total data segment count. */
2067         cmd_pkt->entry_count = (uint8_t)req_cnt;
2068         wmb();
2069         /* Adjust ring index. */
2070         req->ring_index++;
2071         if (req->ring_index == req->length) {
2072                 req->ring_index = 0;
2073                 req->ring_ptr = req->ring;
2074         } else
2075                 req->ring_ptr++;
2076
2077         sp->flags |= SRB_DMA_VALID;
2078
2079         /* Set chip new ring index. */
2080         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2081
2082         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2083         return QLA_SUCCESS;
2084
2085 queuing_error:
2086         if (tot_dsds)
2087                 scsi_dma_unmap(cmd);
2088
2089         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2090
2091         return QLA_FUNCTION_FAILED;
2092 }
2093
2094
2095 /**
2096  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2097  * @sp: command to send to the ISP
2098  *
2099  * Returns non-zero if a failure occurred, else zero.
2100  */
2101 int
2102 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2103 {
2104         int                     nseg;
2105         unsigned long           flags;
2106         uint32_t                *clr_ptr;
2107         uint32_t                index;
2108         uint32_t                handle;
2109         uint16_t                cnt;
2110         uint16_t                req_cnt = 0;
2111         uint16_t                tot_dsds;
2112         uint16_t                tot_prot_dsds;
2113         uint16_t                fw_prot_opts = 0;
2114         struct req_que          *req = NULL;
2115         struct rsp_que          *rsp = NULL;
2116         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2117         struct scsi_qla_host    *vha = sp->fcport->vha;
2118         struct qla_hw_data      *ha = vha->hw;
2119         struct cmd_type_crc_2   *cmd_pkt;
2120         uint32_t                status = 0;
2121         struct qla_qpair        *qpair = sp->qpair;
2122
2123 #define QDSS_GOT_Q_SPACE        BIT_0
2124
2125         /* Check for host side state */
2126         if (!qpair->online) {
2127                 cmd->result = DID_NO_CONNECT << 16;
2128                 return QLA_INTERFACE_ERROR;
2129         }
2130
2131         if (!qpair->difdix_supported &&
2132                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2133                 cmd->result = DID_NO_CONNECT << 16;
2134                 return QLA_INTERFACE_ERROR;
2135         }
2136
2137         /* Only process protection or >16 cdb in this routine */
2138         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2139                 if (cmd->cmd_len <= 16)
2140                         return qla2xxx_start_scsi_mq(sp);
2141         }
2142
2143         spin_lock_irqsave(&qpair->qp_lock, flags);
2144
2145         /* Setup qpair pointers */
2146         rsp = qpair->rsp;
2147         req = qpair->req;
2148
2149         /* So we know we haven't pci_map'ed anything yet */
2150         tot_dsds = 0;
2151
2152         /* Send marker if required */
2153         if (vha->marker_needed != 0) {
2154                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
2155                     QLA_SUCCESS) {
2156                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2157                         return QLA_FUNCTION_FAILED;
2158                 }
2159                 vha->marker_needed = 0;
2160         }
2161
2162         /* Check for room in outstanding command list. */
2163         handle = req->current_outstanding_cmd;
2164         for (index = 1; index < req->num_outstanding_cmds; index++) {
2165                 handle++;
2166                 if (handle == req->num_outstanding_cmds)
2167                         handle = 1;
2168                 if (!req->outstanding_cmds[handle])
2169                         break;
2170         }
2171
2172         if (index == req->num_outstanding_cmds)
2173                 goto queuing_error;
2174
2175         /* Compute number of required data segments */
2176         /* Map the sg table so we have an accurate count of sg entries needed */
2177         if (scsi_sg_count(cmd)) {
2178                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2179                     scsi_sg_count(cmd), cmd->sc_data_direction);
2180                 if (unlikely(!nseg))
2181                         goto queuing_error;
2182                 else
2183                         sp->flags |= SRB_DMA_VALID;
2184
2185                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2186                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2187                         struct qla2_sgx sgx;
2188                         uint32_t        partial;
2189
2190                         memset(&sgx, 0, sizeof(struct qla2_sgx));
2191                         sgx.tot_bytes = scsi_bufflen(cmd);
2192                         sgx.cur_sg = scsi_sglist(cmd);
2193                         sgx.sp = sp;
2194
2195                         nseg = 0;
2196                         while (qla24xx_get_one_block_sg(
2197                             cmd->device->sector_size, &sgx, &partial))
2198                                 nseg++;
2199                 }
2200         } else
2201                 nseg = 0;
2202
2203         /* number of required data segments */
2204         tot_dsds = nseg;
2205
2206         /* Compute number of required protection segments */
2207         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2208                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2209                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2210                 if (unlikely(!nseg))
2211                         goto queuing_error;
2212                 else
2213                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2214
2215                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2216                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2217                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2218                 }
2219         } else {
2220                 nseg = 0;
2221         }
2222
2223         req_cnt = 1;
2224         /* Total Data and protection sg segment(s) */
2225         tot_prot_dsds = nseg;
2226         tot_dsds += nseg;
2227         if (req->cnt < (req_cnt + 2)) {
2228                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2229                     RD_REG_DWORD_RELAXED(req->req_q_out);
2230                 if (req->ring_index < cnt)
2231                         req->cnt = cnt - req->ring_index;
2232                 else
2233                         req->cnt = req->length -
2234                                 (req->ring_index - cnt);
2235                 if (req->cnt < (req_cnt + 2))
2236                         goto queuing_error;
2237         }
2238
2239         status |= QDSS_GOT_Q_SPACE;
2240
2241         /* Build header part of command packet (excluding the OPCODE). */
2242         req->current_outstanding_cmd = handle;
2243         req->outstanding_cmds[handle] = sp;
2244         sp->handle = handle;
2245         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2246         req->cnt -= req_cnt;
2247
2248         /* Fill-in common area */
2249         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2250         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2251
2252         clr_ptr = (uint32_t *)cmd_pkt + 2;
2253         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2254
2255         /* Set NPORT-ID and LUN number*/
2256         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2257         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2258         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2259         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2260
2261         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2262         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2263
2264         /* Total Data and protection segment(s) */
2265         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2266
2267         /* Build IOCB segments and adjust for data protection segments */
2268         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2269             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2270                 QLA_SUCCESS)
2271                 goto queuing_error;
2272
2273         cmd_pkt->entry_count = (uint8_t)req_cnt;
2274         cmd_pkt->timeout = cpu_to_le16(0);
2275         wmb();
2276
2277         /* Adjust ring index. */
2278         req->ring_index++;
2279         if (req->ring_index == req->length) {
2280                 req->ring_index = 0;
2281                 req->ring_ptr = req->ring;
2282         } else
2283                 req->ring_ptr++;
2284
2285         /* Set chip new ring index. */
2286         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2287
2288         /* Manage unprocessed RIO/ZIO commands in response queue. */
2289         if (vha->flags.process_response_queue &&
2290             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2291                 qla24xx_process_response_queue(vha, rsp);
2292
2293         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2294
2295         return QLA_SUCCESS;
2296
2297 queuing_error:
2298         if (status & QDSS_GOT_Q_SPACE) {
2299                 req->outstanding_cmds[handle] = NULL;
2300                 req->cnt += req_cnt;
2301         }
2302         /* Cleanup will be performed by the caller (queuecommand) */
2303
2304         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2305         return QLA_FUNCTION_FAILED;
2306 }
2307
2308 /* Generic Control-SRB manipulation functions. */
2309
2310 /* hardware_lock assumed to be held. */
2311
2312 void *
2313 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2314 {
2315         scsi_qla_host_t *vha = qpair->vha;
2316         struct qla_hw_data *ha = vha->hw;
2317         struct req_que *req = qpair->req;
2318         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2319         uint32_t index, handle;
2320         request_t *pkt;
2321         uint16_t cnt, req_cnt;
2322
2323         pkt = NULL;
2324         req_cnt = 1;
2325         handle = 0;
2326
2327         if (sp && (sp->type != SRB_SCSI_CMD)) {
2328                 /* Adjust entry-counts as needed. */
2329                 req_cnt = sp->iocbs;
2330         }
2331
2332         /* Check for room on request queue. */
2333         if (req->cnt < req_cnt + 2) {
2334                 if (qpair->use_shadow_reg)
2335                         cnt = *req->out_ptr;
2336                 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2337                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2338                 else if (IS_P3P_TYPE(ha))
2339                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2340                 else if (IS_FWI2_CAPABLE(ha))
2341                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2342                 else if (IS_QLAFX00(ha))
2343                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2344                 else
2345                         cnt = qla2x00_debounce_register(
2346                             ISP_REQ_Q_OUT(ha, &reg->isp));
2347
2348                 if  (req->ring_index < cnt)
2349                         req->cnt = cnt - req->ring_index;
2350                 else
2351                         req->cnt = req->length -
2352                             (req->ring_index - cnt);
2353         }
2354         if (req->cnt < req_cnt + 2)
2355                 goto queuing_error;
2356
2357         if (sp) {
2358                 /* Check for room in outstanding command list. */
2359                 handle = req->current_outstanding_cmd;
2360                 for (index = 1; index < req->num_outstanding_cmds; index++) {
2361                         handle++;
2362                         if (handle == req->num_outstanding_cmds)
2363                                 handle = 1;
2364                         if (!req->outstanding_cmds[handle])
2365                                 break;
2366                 }
2367                 if (index == req->num_outstanding_cmds) {
2368                         ql_log(ql_log_warn, vha, 0x700b,
2369                             "No room on outstanding cmd array.\n");
2370                         goto queuing_error;
2371                 }
2372
2373                 /* Prep command array. */
2374                 req->current_outstanding_cmd = handle;
2375                 req->outstanding_cmds[handle] = sp;
2376                 sp->handle = handle;
2377         }
2378
2379         /* Prep packet */
2380         req->cnt -= req_cnt;
2381         pkt = req->ring_ptr;
2382         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2383         if (IS_QLAFX00(ha)) {
2384                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2385                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2386         } else {
2387                 pkt->entry_count = req_cnt;
2388                 pkt->handle = handle;
2389         }
2390
2391         return pkt;
2392
2393 queuing_error:
2394         qpair->tgt_counters.num_alloc_iocb_failed++;
2395         return pkt;
2396 }
2397
2398 void *
2399 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2400 {
2401         scsi_qla_host_t *vha = qpair->vha;
2402
2403         if (qla2x00_reset_active(vha))
2404                 return NULL;
2405
2406         return __qla2x00_alloc_iocbs(qpair, sp);
2407 }
2408
2409 void *
2410 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2411 {
2412         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2413 }
2414
2415 static void
2416 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2417 {
2418         struct srb_iocb *lio = &sp->u.iocb_cmd;
2419
2420         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2421         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2422         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2423                 logio->control_flags |= LCF_NVME_PRLI;
2424
2425         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2426         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2427         logio->port_id[1] = sp->fcport->d_id.b.area;
2428         logio->port_id[2] = sp->fcport->d_id.b.domain;
2429         logio->vp_index = sp->vha->vp_idx;
2430 }
2431
2432 static void
2433 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2434 {
2435         struct srb_iocb *lio = &sp->u.iocb_cmd;
2436
2437         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2438         if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2439                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2440         } else {
2441                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2442                 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2443                         logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2444                 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2445                         logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2446         }
2447         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2448         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2449         logio->port_id[1] = sp->fcport->d_id.b.area;
2450         logio->port_id[2] = sp->fcport->d_id.b.domain;
2451         logio->vp_index = sp->vha->vp_idx;
2452 }
2453
2454 static void
2455 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2456 {
2457         struct qla_hw_data *ha = sp->vha->hw;
2458         struct srb_iocb *lio = &sp->u.iocb_cmd;
2459         uint16_t opts;
2460
2461         mbx->entry_type = MBX_IOCB_TYPE;
2462         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2463         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2464         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2465         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2466         if (HAS_EXTENDED_IDS(ha)) {
2467                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2468                 mbx->mb10 = cpu_to_le16(opts);
2469         } else {
2470                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2471         }
2472         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2473         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2474             sp->fcport->d_id.b.al_pa);
2475         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2476 }
2477
2478 static void
2479 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2480 {
2481         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2482         logio->control_flags =
2483             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2484         if (!sp->fcport->keep_nport_handle)
2485                 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2486         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2487         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2488         logio->port_id[1] = sp->fcport->d_id.b.area;
2489         logio->port_id[2] = sp->fcport->d_id.b.domain;
2490         logio->vp_index = sp->vha->vp_idx;
2491 }
2492
2493 static void
2494 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2495 {
2496         struct qla_hw_data *ha = sp->vha->hw;
2497
2498         mbx->entry_type = MBX_IOCB_TYPE;
2499         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2500         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2501         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2502             cpu_to_le16(sp->fcport->loop_id):
2503             cpu_to_le16(sp->fcport->loop_id << 8);
2504         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2505         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2506             sp->fcport->d_id.b.al_pa);
2507         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2508         /* Implicit: mbx->mbx10 = 0. */
2509 }
2510
2511 static void
2512 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2513 {
2514         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2515         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2516         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2517         logio->vp_index = sp->vha->vp_idx;
2518 }
2519
2520 static void
2521 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2522 {
2523         struct qla_hw_data *ha = sp->vha->hw;
2524
2525         mbx->entry_type = MBX_IOCB_TYPE;
2526         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2527         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2528         if (HAS_EXTENDED_IDS(ha)) {
2529                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2530                 mbx->mb10 = cpu_to_le16(BIT_0);
2531         } else {
2532                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2533         }
2534         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2535         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2536         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2537         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2538         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2539 }
2540
2541 static void
2542 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2543 {
2544         uint32_t flags;
2545         uint64_t lun;
2546         struct fc_port *fcport = sp->fcport;
2547         scsi_qla_host_t *vha = fcport->vha;
2548         struct qla_hw_data *ha = vha->hw;
2549         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2550         struct req_que *req = vha->req;
2551
2552         flags = iocb->u.tmf.flags;
2553         lun = iocb->u.tmf.lun;
2554
2555         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2556         tsk->entry_count = 1;
2557         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2558         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2559         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2560         tsk->control_flags = cpu_to_le32(flags);
2561         tsk->port_id[0] = fcport->d_id.b.al_pa;
2562         tsk->port_id[1] = fcport->d_id.b.area;
2563         tsk->port_id[2] = fcport->d_id.b.domain;
2564         tsk->vp_index = fcport->vha->vp_idx;
2565
2566         if (flags == TCF_LUN_RESET) {
2567                 int_to_scsilun(lun, &tsk->lun);
2568                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2569                         sizeof(tsk->lun));
2570         }
2571 }
2572
2573 static void
2574 qla2x00_els_dcmd_sp_free(void *data)
2575 {
2576         srb_t *sp = data;
2577         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2578
2579         kfree(sp->fcport);
2580
2581         if (elsio->u.els_logo.els_logo_pyld)
2582                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2583                     elsio->u.els_logo.els_logo_pyld,
2584                     elsio->u.els_logo.els_logo_pyld_dma);
2585
2586         del_timer(&elsio->timer);
2587         qla2x00_rel_sp(sp);
2588 }
2589
2590 static void
2591 qla2x00_els_dcmd_iocb_timeout(void *data)
2592 {
2593         srb_t *sp = data;
2594         fc_port_t *fcport = sp->fcport;
2595         struct scsi_qla_host *vha = sp->vha;
2596         struct srb_iocb *lio = &sp->u.iocb_cmd;
2597
2598         ql_dbg(ql_dbg_io, vha, 0x3069,
2599             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2600             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2601             fcport->d_id.b.al_pa);
2602
2603         complete(&lio->u.els_logo.comp);
2604 }
2605
2606 static void
2607 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2608 {
2609         srb_t *sp = ptr;
2610         fc_port_t *fcport = sp->fcport;
2611         struct srb_iocb *lio = &sp->u.iocb_cmd;
2612         struct scsi_qla_host *vha = sp->vha;
2613
2614         ql_dbg(ql_dbg_io, vha, 0x3072,
2615             "%s hdl=%x, portid=%02x%02x%02x done\n",
2616             sp->name, sp->handle, fcport->d_id.b.domain,
2617             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2618
2619         complete(&lio->u.els_logo.comp);
2620 }
2621
2622 int
2623 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2624     port_id_t remote_did)
2625 {
2626         srb_t *sp;
2627         fc_port_t *fcport = NULL;
2628         struct srb_iocb *elsio = NULL;
2629         struct qla_hw_data *ha = vha->hw;
2630         struct els_logo_payload logo_pyld;
2631         int rval = QLA_SUCCESS;
2632
2633         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2634         if (!fcport) {
2635                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2636                return -ENOMEM;
2637         }
2638
2639         /* Alloc SRB structure */
2640         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2641         if (!sp) {
2642                 kfree(fcport);
2643                 ql_log(ql_log_info, vha, 0x70e6,
2644                  "SRB allocation failed\n");
2645                 return -ENOMEM;
2646         }
2647
2648         elsio = &sp->u.iocb_cmd;
2649         fcport->loop_id = 0xFFFF;
2650         fcport->d_id.b.domain = remote_did.b.domain;
2651         fcport->d_id.b.area = remote_did.b.area;
2652         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2653
2654         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2655             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2656
2657         sp->type = SRB_ELS_DCMD;
2658         sp->name = "ELS_DCMD";
2659         sp->fcport = fcport;
2660         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2661         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2662         init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2663         sp->done = qla2x00_els_dcmd_sp_done;
2664         sp->free = qla2x00_els_dcmd_sp_free;
2665
2666         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2667                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2668                             GFP_KERNEL);
2669
2670         if (!elsio->u.els_logo.els_logo_pyld) {
2671                 sp->free(sp);
2672                 return QLA_FUNCTION_FAILED;
2673         }
2674
2675         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2676
2677         elsio->u.els_logo.els_cmd = els_opcode;
2678         logo_pyld.opcode = els_opcode;
2679         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2680         logo_pyld.s_id[1] = vha->d_id.b.area;
2681         logo_pyld.s_id[2] = vha->d_id.b.domain;
2682         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2683         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2684
2685         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2686             sizeof(struct els_logo_payload));
2687
2688         rval = qla2x00_start_sp(sp);
2689         if (rval != QLA_SUCCESS) {
2690                 sp->free(sp);
2691                 return QLA_FUNCTION_FAILED;
2692         }
2693
2694         ql_dbg(ql_dbg_io, vha, 0x3074,
2695             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2696             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2697             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2698
2699         wait_for_completion(&elsio->u.els_logo.comp);
2700
2701         sp->free(sp);
2702         return rval;
2703 }
2704
2705 static void
2706 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2707 {
2708         scsi_qla_host_t *vha = sp->vha;
2709         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2710
2711         els_iocb->entry_type = ELS_IOCB_TYPE;
2712         els_iocb->entry_count = 1;
2713         els_iocb->sys_define = 0;
2714         els_iocb->entry_status = 0;
2715         els_iocb->handle = sp->handle;
2716         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2717         els_iocb->tx_dsd_count = 1;
2718         els_iocb->vp_index = vha->vp_idx;
2719         els_iocb->sof_type = EST_SOFI3;
2720         els_iocb->rx_dsd_count = 0;
2721         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2722
2723         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2724         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2725         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2726         els_iocb->s_id[0] = vha->d_id.b.al_pa;
2727         els_iocb->s_id[1] = vha->d_id.b.area;
2728         els_iocb->s_id[2] = vha->d_id.b.domain;
2729         els_iocb->control_flags = 0;
2730
2731         if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2732                 els_iocb->tx_byte_count = els_iocb->tx_len =
2733                         sizeof(struct els_plogi_payload);
2734                 els_iocb->tx_address[0] =
2735                         cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2736                 els_iocb->tx_address[1] =
2737                         cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2738
2739                 els_iocb->rx_dsd_count = 1;
2740                 els_iocb->rx_byte_count = els_iocb->rx_len =
2741                         sizeof(struct els_plogi_payload);
2742                 els_iocb->rx_address[0] =
2743                         cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2744                 els_iocb->rx_address[1] =
2745                         cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2746
2747                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2748                     "PLOGI ELS IOCB:\n");
2749                 ql_dump_buffer(ql_log_info, vha, 0x0109,
2750                     (uint8_t *)els_iocb, 0x70);
2751         } else {
2752                 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2753                 els_iocb->tx_address[0] =
2754                     cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2755                 els_iocb->tx_address[1] =
2756                     cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2757                 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2758
2759                 els_iocb->rx_byte_count = 0;
2760                 els_iocb->rx_address[0] = 0;
2761                 els_iocb->rx_address[1] = 0;
2762                 els_iocb->rx_len = 0;
2763         }
2764
2765         sp->vha->qla_stats.control_requests++;
2766 }
2767
2768 static void
2769 qla2x00_els_dcmd2_iocb_timeout(void *data)
2770 {
2771         srb_t *sp = data;
2772         fc_port_t *fcport = sp->fcport;
2773         struct scsi_qla_host *vha = sp->vha;
2774         struct qla_hw_data *ha = vha->hw;
2775         unsigned long flags = 0;
2776         int res;
2777
2778         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2779             "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2780             sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2781
2782         /* Abort the exchange */
2783         spin_lock_irqsave(&ha->hardware_lock, flags);
2784         res = ha->isp_ops->abort_command(sp);
2785         ql_dbg(ql_dbg_io, vha, 0x3070,
2786             "mbx abort_command %s\n",
2787             (res == QLA_SUCCESS) ? "successful" : "failed");
2788         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2789
2790         sp->done(sp, QLA_FUNCTION_TIMEOUT);
2791 }
2792
2793 static void
2794 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2795 {
2796         srb_t *sp = ptr;
2797         fc_port_t *fcport = sp->fcport;
2798         struct srb_iocb *lio = &sp->u.iocb_cmd;
2799         struct scsi_qla_host *vha = sp->vha;
2800         struct event_arg ea;
2801         struct qla_work_evt *e;
2802
2803         ql_dbg(ql_dbg_disc, vha, 0x3072,
2804             "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2805             sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2806
2807         fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2808         del_timer(&sp->u.iocb_cmd.timer);
2809
2810         if (sp->flags & SRB_WAKEUP_ON_COMP)
2811                 complete(&lio->u.els_plogi.comp);
2812         else {
2813                 if (res) {
2814                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2815                 } else {
2816                         memset(&ea, 0, sizeof(ea));
2817                         ea.fcport = fcport;
2818                         ea.rc = res;
2819                         ea.event = FCME_ELS_PLOGI_DONE;
2820                         qla2x00_fcport_event_handler(vha, &ea);
2821                 }
2822
2823                 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2824                 if (!e) {
2825                         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2826
2827                         if (elsio->u.els_plogi.els_plogi_pyld)
2828                                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2829                                     elsio->u.els_plogi.tx_size,
2830                                     elsio->u.els_plogi.els_plogi_pyld,
2831                                     elsio->u.els_plogi.els_plogi_pyld_dma);
2832
2833                         if (elsio->u.els_plogi.els_resp_pyld)
2834                                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2835                                     elsio->u.els_plogi.rx_size,
2836                                     elsio->u.els_plogi.els_resp_pyld,
2837                                     elsio->u.els_plogi.els_resp_pyld_dma);
2838                         sp->free(sp);
2839                         return;
2840                 }
2841                 e->u.iosb.sp = sp;
2842                 qla2x00_post_work(vha, e);
2843         }
2844 }
2845
2846 int
2847 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2848     fc_port_t *fcport, bool wait)
2849 {
2850         srb_t *sp;
2851         struct srb_iocb *elsio = NULL;
2852         struct qla_hw_data *ha = vha->hw;
2853         int rval = QLA_SUCCESS;
2854         void    *ptr, *resp_ptr;
2855
2856         /* Alloc SRB structure */
2857         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2858         if (!sp) {
2859                 ql_log(ql_log_info, vha, 0x70e6,
2860                  "SRB allocation failed\n");
2861                 return -ENOMEM;
2862         }
2863
2864         elsio = &sp->u.iocb_cmd;
2865         ql_dbg(ql_dbg_io, vha, 0x3073,
2866             "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2867
2868         fcport->flags |= FCF_ASYNC_SENT;
2869         sp->type = SRB_ELS_DCMD;
2870         sp->name = "ELS_DCMD";
2871         sp->fcport = fcport;
2872
2873         elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2874         init_completion(&elsio->u.els_plogi.comp);
2875         if (wait)
2876                 sp->flags = SRB_WAKEUP_ON_COMP;
2877
2878         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2879
2880         sp->done = qla2x00_els_dcmd2_sp_done;
2881         elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2882
2883         ptr = elsio->u.els_plogi.els_plogi_pyld =
2884             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2885                 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2886
2887         if (!elsio->u.els_plogi.els_plogi_pyld) {
2888                 rval = QLA_FUNCTION_FAILED;
2889                 goto out;
2890         }
2891
2892         resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2893             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2894                 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2895
2896         if (!elsio->u.els_plogi.els_resp_pyld) {
2897                 rval = QLA_FUNCTION_FAILED;
2898                 goto out;
2899         }
2900
2901         ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2902
2903         memset(ptr, 0, sizeof(struct els_plogi_payload));
2904         memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2905         memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2906             &ha->plogi_els_payld.data,
2907             sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2908
2909         elsio->u.els_plogi.els_cmd = els_opcode;
2910         elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2911
2912         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2913         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2914             (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2915
2916         rval = qla2x00_start_sp(sp);
2917         if (rval != QLA_SUCCESS) {
2918                 rval = QLA_FUNCTION_FAILED;
2919         } else {
2920                 ql_dbg(ql_dbg_disc, vha, 0x3074,
2921                     "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2922                     sp->name, sp->handle, fcport->loop_id,
2923                     fcport->d_id.b24, vha->d_id.b24);
2924         }
2925
2926         if (wait) {
2927                 wait_for_completion(&elsio->u.els_plogi.comp);
2928
2929                 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2930                         rval = QLA_FUNCTION_FAILED;
2931         } else {
2932                 goto done;
2933         }
2934
2935 out:
2936         fcport->flags &= ~(FCF_ASYNC_SENT);
2937         if (elsio->u.els_plogi.els_plogi_pyld)
2938                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2939                     elsio->u.els_plogi.tx_size,
2940                     elsio->u.els_plogi.els_plogi_pyld,
2941                     elsio->u.els_plogi.els_plogi_pyld_dma);
2942
2943         if (elsio->u.els_plogi.els_resp_pyld)
2944                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2945                     elsio->u.els_plogi.rx_size,
2946                     elsio->u.els_plogi.els_resp_pyld,
2947                     elsio->u.els_plogi.els_resp_pyld_dma);
2948
2949         sp->free(sp);
2950 done:
2951         return rval;
2952 }
2953
2954 static void
2955 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2956 {
2957         struct bsg_job *bsg_job = sp->u.bsg_job;
2958         struct fc_bsg_request *bsg_request = bsg_job->request;
2959
2960         els_iocb->entry_type = ELS_IOCB_TYPE;
2961         els_iocb->entry_count = 1;
2962         els_iocb->sys_define = 0;
2963         els_iocb->entry_status = 0;
2964         els_iocb->handle = sp->handle;
2965         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2966         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2967         els_iocb->vp_index = sp->vha->vp_idx;
2968         els_iocb->sof_type = EST_SOFI3;
2969         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2970
2971         els_iocb->opcode =
2972             sp->type == SRB_ELS_CMD_RPT ?
2973             bsg_request->rqst_data.r_els.els_code :
2974             bsg_request->rqst_data.h_els.command_code;
2975         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2976         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2977         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2978         els_iocb->control_flags = 0;
2979         els_iocb->rx_byte_count =
2980             cpu_to_le32(bsg_job->reply_payload.payload_len);
2981         els_iocb->tx_byte_count =
2982             cpu_to_le32(bsg_job->request_payload.payload_len);
2983
2984         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2985             (bsg_job->request_payload.sg_list)));
2986         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2987             (bsg_job->request_payload.sg_list)));
2988         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2989             (bsg_job->request_payload.sg_list));
2990
2991         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2992             (bsg_job->reply_payload.sg_list)));
2993         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2994             (bsg_job->reply_payload.sg_list)));
2995         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2996             (bsg_job->reply_payload.sg_list));
2997
2998         sp->vha->qla_stats.control_requests++;
2999 }
3000
3001 static void
3002 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3003 {
3004         uint16_t        avail_dsds;
3005         uint32_t        *cur_dsd;
3006         struct scatterlist *sg;
3007         int index;
3008         uint16_t tot_dsds;
3009         scsi_qla_host_t *vha = sp->vha;
3010         struct qla_hw_data *ha = vha->hw;
3011         struct bsg_job *bsg_job = sp->u.bsg_job;
3012         int loop_iterartion = 0;
3013         int entry_count = 1;
3014
3015         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3016         ct_iocb->entry_type = CT_IOCB_TYPE;
3017         ct_iocb->entry_status = 0;
3018         ct_iocb->handle1 = sp->handle;
3019         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3020         ct_iocb->status = cpu_to_le16(0);
3021         ct_iocb->control_flags = cpu_to_le16(0);
3022         ct_iocb->timeout = 0;
3023         ct_iocb->cmd_dsd_count =
3024             cpu_to_le16(bsg_job->request_payload.sg_cnt);
3025         ct_iocb->total_dsd_count =
3026             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3027         ct_iocb->req_bytecount =
3028             cpu_to_le32(bsg_job->request_payload.payload_len);
3029         ct_iocb->rsp_bytecount =
3030             cpu_to_le32(bsg_job->reply_payload.payload_len);
3031
3032         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
3033             (bsg_job->request_payload.sg_list)));
3034         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
3035             (bsg_job->request_payload.sg_list)));
3036         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
3037
3038         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
3039             (bsg_job->reply_payload.sg_list)));
3040         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
3041             (bsg_job->reply_payload.sg_list)));
3042         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
3043
3044         avail_dsds = 1;
3045         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
3046         index = 0;
3047         tot_dsds = bsg_job->reply_payload.sg_cnt;
3048
3049         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3050                 dma_addr_t       sle_dma;
3051                 cont_a64_entry_t *cont_pkt;
3052
3053                 /* Allocate additional continuation packets? */
3054                 if (avail_dsds == 0) {
3055                         /*
3056                         * Five DSDs are available in the Cont.
3057                         * Type 1 IOCB.
3058                                */
3059                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3060                             vha->hw->req_q_map[0]);
3061                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3062                         avail_dsds = 5;
3063                         entry_count++;
3064                 }
3065
3066                 sle_dma = sg_dma_address(sg);
3067                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3068                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3069                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3070                 loop_iterartion++;
3071                 avail_dsds--;
3072         }
3073         ct_iocb->entry_count = entry_count;
3074
3075         sp->vha->qla_stats.control_requests++;
3076 }
3077
3078 static void
3079 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3080 {
3081         uint16_t        avail_dsds;
3082         uint32_t        *cur_dsd;
3083         struct scatterlist *sg;
3084         int index;
3085         uint16_t cmd_dsds, rsp_dsds;
3086         scsi_qla_host_t *vha = sp->vha;
3087         struct qla_hw_data *ha = vha->hw;
3088         struct bsg_job *bsg_job = sp->u.bsg_job;
3089         int entry_count = 1;
3090         cont_a64_entry_t *cont_pkt = NULL;
3091
3092         ct_iocb->entry_type = CT_IOCB_TYPE;
3093         ct_iocb->entry_status = 0;
3094         ct_iocb->sys_define = 0;
3095         ct_iocb->handle = sp->handle;
3096
3097         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3098         ct_iocb->vp_index = sp->vha->vp_idx;
3099         ct_iocb->comp_status = cpu_to_le16(0);
3100
3101         cmd_dsds = bsg_job->request_payload.sg_cnt;
3102         rsp_dsds = bsg_job->reply_payload.sg_cnt;
3103
3104         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3105         ct_iocb->timeout = 0;
3106         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3107         ct_iocb->cmd_byte_count =
3108             cpu_to_le32(bsg_job->request_payload.payload_len);
3109
3110         avail_dsds = 2;
3111         cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
3112         index = 0;
3113
3114         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3115                 dma_addr_t       sle_dma;
3116
3117                 /* Allocate additional continuation packets? */
3118                 if (avail_dsds == 0) {
3119                         /*
3120                          * Five DSDs are available in the Cont.
3121                          * Type 1 IOCB.
3122                          */
3123                         cont_pkt = qla2x00_prep_cont_type1_iocb(
3124                             vha, ha->req_q_map[0]);
3125                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3126                         avail_dsds = 5;
3127                         entry_count++;
3128                 }
3129
3130                 sle_dma = sg_dma_address(sg);
3131                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3132                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3133                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3134                 avail_dsds--;
3135         }
3136
3137         index = 0;
3138
3139         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3140                 dma_addr_t       sle_dma;
3141
3142                 /* Allocate additional continuation packets? */
3143                 if (avail_dsds == 0) {
3144                         /*
3145                         * Five DSDs are available in the Cont.
3146                         * Type 1 IOCB.
3147                                */
3148                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3149                             ha->req_q_map[0]);
3150                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3151                         avail_dsds = 5;
3152                         entry_count++;
3153                 }
3154
3155                 sle_dma = sg_dma_address(sg);
3156                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3157                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3158                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3159                 avail_dsds--;
3160         }
3161         ct_iocb->entry_count = entry_count;
3162 }
3163
3164 /*
3165  * qla82xx_start_scsi() - Send a SCSI command to the ISP
3166  * @sp: command to send to the ISP
3167  *
3168  * Returns non-zero if a failure occurred, else zero.
3169  */
3170 int
3171 qla82xx_start_scsi(srb_t *sp)
3172 {
3173         int             nseg;
3174         unsigned long   flags;
3175         struct scsi_cmnd *cmd;
3176         uint32_t        *clr_ptr;
3177         uint32_t        index;
3178         uint32_t        handle;
3179         uint16_t        cnt;
3180         uint16_t        req_cnt;
3181         uint16_t        tot_dsds;
3182         struct device_reg_82xx __iomem *reg;
3183         uint32_t dbval;
3184         uint32_t *fcp_dl;
3185         uint8_t additional_cdb_len;
3186         struct ct6_dsd *ctx;
3187         struct scsi_qla_host *vha = sp->vha;
3188         struct qla_hw_data *ha = vha->hw;
3189         struct req_que *req = NULL;
3190         struct rsp_que *rsp = NULL;
3191
3192         /* Setup device pointers. */
3193         reg = &ha->iobase->isp82;
3194         cmd = GET_CMD_SP(sp);
3195         req = vha->req;
3196         rsp = ha->rsp_q_map[0];
3197
3198         /* So we know we haven't pci_map'ed anything yet */
3199         tot_dsds = 0;
3200
3201         dbval = 0x04 | (ha->portnum << 5);
3202
3203         /* Send marker if required */
3204         if (vha->marker_needed != 0) {
3205                 if (qla2x00_marker(vha, req,
3206                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3207                         ql_log(ql_log_warn, vha, 0x300c,
3208                             "qla2x00_marker failed for cmd=%p.\n", cmd);
3209                         return QLA_FUNCTION_FAILED;
3210                 }
3211                 vha->marker_needed = 0;
3212         }
3213
3214         /* Acquire ring specific lock */
3215         spin_lock_irqsave(&ha->hardware_lock, flags);
3216
3217         /* Check for room in outstanding command list. */
3218         handle = req->current_outstanding_cmd;
3219         for (index = 1; index < req->num_outstanding_cmds; index++) {
3220                 handle++;
3221                 if (handle == req->num_outstanding_cmds)
3222                         handle = 1;
3223                 if (!req->outstanding_cmds[handle])
3224                         break;
3225         }
3226         if (index == req->num_outstanding_cmds)
3227                 goto queuing_error;
3228
3229         /* Map the sg table so we have an accurate count of sg entries needed */
3230         if (scsi_sg_count(cmd)) {
3231                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3232                     scsi_sg_count(cmd), cmd->sc_data_direction);
3233                 if (unlikely(!nseg))
3234                         goto queuing_error;
3235         } else
3236                 nseg = 0;
3237
3238         tot_dsds = nseg;
3239
3240         if (tot_dsds > ql2xshiftctondsd) {
3241                 struct cmd_type_6 *cmd_pkt;
3242                 uint16_t more_dsd_lists = 0;
3243                 struct dsd_dma *dsd_ptr;
3244                 uint16_t i;
3245
3246                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3247                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3248                         ql_dbg(ql_dbg_io, vha, 0x300d,
3249                             "Num of DSD list %d is than %d for cmd=%p.\n",
3250                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3251                             cmd);
3252                         goto queuing_error;
3253                 }
3254
3255                 if (more_dsd_lists <= ha->gbl_dsd_avail)
3256                         goto sufficient_dsds;
3257                 else
3258                         more_dsd_lists -= ha->gbl_dsd_avail;
3259
3260                 for (i = 0; i < more_dsd_lists; i++) {
3261                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3262                         if (!dsd_ptr) {
3263                                 ql_log(ql_log_fatal, vha, 0x300e,
3264                                     "Failed to allocate memory for dsd_dma "
3265                                     "for cmd=%p.\n", cmd);
3266                                 goto queuing_error;
3267                         }
3268
3269                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3270                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3271                         if (!dsd_ptr->dsd_addr) {
3272                                 kfree(dsd_ptr);
3273                                 ql_log(ql_log_fatal, vha, 0x300f,
3274                                     "Failed to allocate memory for dsd_addr "
3275                                     "for cmd=%p.\n", cmd);
3276                                 goto queuing_error;
3277                         }
3278                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3279                         ha->gbl_dsd_avail++;
3280                 }
3281
3282 sufficient_dsds:
3283                 req_cnt = 1;
3284
3285                 if (req->cnt < (req_cnt + 2)) {
3286                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3287                                 &reg->req_q_out[0]);
3288                         if (req->ring_index < cnt)
3289                                 req->cnt = cnt - req->ring_index;
3290                         else
3291                                 req->cnt = req->length -
3292                                         (req->ring_index - cnt);
3293                         if (req->cnt < (req_cnt + 2))
3294                                 goto queuing_error;
3295                 }
3296
3297                 ctx = sp->u.scmd.ctx =
3298                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3299                 if (!ctx) {
3300                         ql_log(ql_log_fatal, vha, 0x3010,
3301                             "Failed to allocate ctx for cmd=%p.\n", cmd);
3302                         goto queuing_error;
3303                 }
3304
3305                 memset(ctx, 0, sizeof(struct ct6_dsd));
3306                 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3307                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3308                 if (!ctx->fcp_cmnd) {
3309                         ql_log(ql_log_fatal, vha, 0x3011,
3310                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3311                         goto queuing_error;
3312                 }
3313
3314                 /* Initialize the DSD list and dma handle */
3315                 INIT_LIST_HEAD(&ctx->dsd_list);
3316                 ctx->dsd_use_cnt = 0;
3317
3318                 if (cmd->cmd_len > 16) {
3319                         additional_cdb_len = cmd->cmd_len - 16;
3320                         if ((cmd->cmd_len % 4) != 0) {
3321                                 /* SCSI command bigger than 16 bytes must be
3322                                  * multiple of 4
3323                                  */
3324                                 ql_log(ql_log_warn, vha, 0x3012,
3325                                     "scsi cmd len %d not multiple of 4 "
3326                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3327                                 goto queuing_error_fcp_cmnd;
3328                         }
3329                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3330                 } else {
3331                         additional_cdb_len = 0;
3332                         ctx->fcp_cmnd_len = 12 + 16 + 4;
3333                 }
3334
3335                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3336                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3337
3338                 /* Zero out remaining portion of packet. */
3339                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3340                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3341                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3342                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3343
3344                 /* Set NPORT-ID and LUN number*/
3345                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3346                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3347                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3348                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3349                 cmd_pkt->vp_index = sp->vha->vp_idx;
3350
3351                 /* Build IOCB segments */
3352                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3353                         goto queuing_error_fcp_cmnd;
3354
3355                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3356                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3357
3358                 /* build FCP_CMND IU */
3359                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3360                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3361
3362                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3363                         ctx->fcp_cmnd->additional_cdb_len |= 1;
3364                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3365                         ctx->fcp_cmnd->additional_cdb_len |= 2;
3366
3367                 /* Populate the FCP_PRIO. */
3368                 if (ha->flags.fcp_prio_enabled)
3369                         ctx->fcp_cmnd->task_attribute |=
3370                             sp->fcport->fcp_prio << 3;
3371
3372                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3373
3374                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3375                     additional_cdb_len);
3376                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3377
3378                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3379                 cmd_pkt->fcp_cmnd_dseg_address[0] =
3380                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3381                 cmd_pkt->fcp_cmnd_dseg_address[1] =
3382                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3383
3384                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3385                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3386                 /* Set total data segment count. */
3387                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3388                 /* Specify response queue number where
3389                  * completion should happen
3390                  */
3391                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3392         } else {
3393                 struct cmd_type_7 *cmd_pkt;
3394                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3395                 if (req->cnt < (req_cnt + 2)) {
3396                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3397                             &reg->req_q_out[0]);
3398                         if (req->ring_index < cnt)
3399                                 req->cnt = cnt - req->ring_index;
3400                         else
3401                                 req->cnt = req->length -
3402                                         (req->ring_index - cnt);
3403                 }
3404                 if (req->cnt < (req_cnt + 2))
3405                         goto queuing_error;
3406
3407                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3408                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3409
3410                 /* Zero out remaining portion of packet. */
3411                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3412                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3413                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3414                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3415
3416                 /* Set NPORT-ID and LUN number*/
3417                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3418                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3419                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3420                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3421                 cmd_pkt->vp_index = sp->vha->vp_idx;
3422
3423                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3424                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3425                     sizeof(cmd_pkt->lun));
3426
3427                 /* Populate the FCP_PRIO. */
3428                 if (ha->flags.fcp_prio_enabled)
3429                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3430
3431                 /* Load SCSI command packet. */
3432                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3433                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3434
3435                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3436
3437                 /* Build IOCB segments */
3438                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3439
3440                 /* Set total data segment count. */
3441                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3442                 /* Specify response queue number where
3443                  * completion should happen.
3444                  */
3445                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3446
3447         }
3448         /* Build command packet. */
3449         req->current_outstanding_cmd = handle;
3450         req->outstanding_cmds[handle] = sp;
3451         sp->handle = handle;
3452         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3453         req->cnt -= req_cnt;
3454         wmb();
3455
3456         /* Adjust ring index. */
3457         req->ring_index++;
3458         if (req->ring_index == req->length) {
3459                 req->ring_index = 0;
3460                 req->ring_ptr = req->ring;
3461         } else
3462                 req->ring_ptr++;
3463
3464         sp->flags |= SRB_DMA_VALID;
3465
3466         /* Set chip new ring index. */
3467         /* write, read and verify logic */
3468         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3469         if (ql2xdbwr)
3470                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3471         else {
3472                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3473                 wmb();
3474                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3475                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3476                         wmb();
3477                 }
3478         }
3479
3480         /* Manage unprocessed RIO/ZIO commands in response queue. */
3481         if (vha->flags.process_response_queue &&
3482             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3483                 qla24xx_process_response_queue(vha, rsp);
3484
3485         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3486         return QLA_SUCCESS;
3487
3488 queuing_error_fcp_cmnd:
3489         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3490 queuing_error:
3491         if (tot_dsds)
3492                 scsi_dma_unmap(cmd);
3493
3494         if (sp->u.scmd.ctx) {
3495                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3496                 sp->u.scmd.ctx = NULL;
3497         }
3498         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3499
3500         return QLA_FUNCTION_FAILED;
3501 }
3502
3503 static void
3504 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3505 {
3506         struct srb_iocb *aio = &sp->u.iocb_cmd;
3507         scsi_qla_host_t *vha = sp->vha;
3508         struct req_que *req = sp->qpair->req;
3509
3510         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3511         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3512         abt_iocb->entry_count = 1;
3513         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3514         if (sp->fcport) {
3515                 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3516                 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3517                 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3518                 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3519         }
3520         abt_iocb->handle_to_abort =
3521             cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3522                                     aio->u.abt.cmd_hndl));
3523         abt_iocb->vp_index = vha->vp_idx;
3524         abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3525         /* Send the command to the firmware */
3526         wmb();
3527 }
3528
3529 static void
3530 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3531 {
3532         int i, sz;
3533
3534         mbx->entry_type = MBX_IOCB_TYPE;
3535         mbx->handle = sp->handle;
3536         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3537
3538         for (i = 0; i < sz; i++)
3539                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3540 }
3541
3542 static void
3543 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3544 {
3545         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3546         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3547         ct_pkt->handle = sp->handle;
3548 }
3549
3550 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3551         struct nack_to_isp *nack)
3552 {
3553         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3554
3555         nack->entry_type = NOTIFY_ACK_TYPE;
3556         nack->entry_count = 1;
3557         nack->ox_id = ntfy->ox_id;
3558
3559         nack->u.isp24.handle = sp->handle;
3560         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3561         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3562                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3563                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3564         }
3565         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3566         nack->u.isp24.status = ntfy->u.isp24.status;
3567         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3568         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3569         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3570         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3571         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3572         nack->u.isp24.srr_flags = 0;
3573         nack->u.isp24.srr_reject_code = 0;
3574         nack->u.isp24.srr_reject_code_expl = 0;
3575         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3576 }
3577
3578 /*
3579  * Build NVME LS request
3580  */
3581 static int
3582 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3583 {
3584         struct srb_iocb *nvme;
3585         int     rval = QLA_SUCCESS;
3586
3587         nvme = &sp->u.iocb_cmd;
3588         cmd_pkt->entry_type = PT_LS4_REQUEST;
3589         cmd_pkt->entry_count = 1;
3590         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3591
3592         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3593         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3594         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3595
3596         cmd_pkt->tx_dseg_count = 1;
3597         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3598         cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3599         cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3600         cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3601
3602         cmd_pkt->rx_dseg_count = 1;
3603         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3604         cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
3605         cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3606         cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3607
3608         return rval;
3609 }
3610
3611 static void
3612 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3613 {
3614         int map, pos;
3615
3616         vce->entry_type = VP_CTRL_IOCB_TYPE;
3617         vce->handle = sp->handle;
3618         vce->entry_count = 1;
3619         vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3620         vce->vp_count = cpu_to_le16(1);
3621
3622         /*
3623          * index map in firmware starts with 1; decrement index
3624          * this is ok as we never use index 0
3625          */
3626         map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3627         pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3628         vce->vp_idx_map[map] |= 1 << pos;
3629 }
3630
3631 static void
3632 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3633 {
3634         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3635         logio->control_flags =
3636             cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3637
3638         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3639         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3640         logio->port_id[1] = sp->fcport->d_id.b.area;
3641         logio->port_id[2] = sp->fcport->d_id.b.domain;
3642         logio->vp_index = sp->fcport->vha->vp_idx;
3643 }
3644
3645 int
3646 qla2x00_start_sp(srb_t *sp)
3647 {
3648         int rval;
3649         scsi_qla_host_t *vha = sp->vha;
3650         struct qla_hw_data *ha = vha->hw;
3651         struct qla_qpair *qp = sp->qpair;
3652         void *pkt;
3653         unsigned long flags;
3654
3655         rval = QLA_FUNCTION_FAILED;
3656         spin_lock_irqsave(qp->qp_lock_ptr, flags);
3657         pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3658         if (!pkt) {
3659                 ql_log(ql_log_warn, vha, 0x700c,
3660                     "qla2x00_alloc_iocbs failed.\n");
3661                 goto done;
3662         }
3663
3664         rval = QLA_SUCCESS;
3665         switch (sp->type) {
3666         case SRB_LOGIN_CMD:
3667                 IS_FWI2_CAPABLE(ha) ?
3668                     qla24xx_login_iocb(sp, pkt) :
3669                     qla2x00_login_iocb(sp, pkt);
3670                 break;
3671         case SRB_PRLI_CMD:
3672                 qla24xx_prli_iocb(sp, pkt);
3673                 break;
3674         case SRB_LOGOUT_CMD:
3675                 IS_FWI2_CAPABLE(ha) ?
3676                     qla24xx_logout_iocb(sp, pkt) :
3677                     qla2x00_logout_iocb(sp, pkt);
3678                 break;
3679         case SRB_ELS_CMD_RPT:
3680         case SRB_ELS_CMD_HST:
3681                 qla24xx_els_iocb(sp, pkt);
3682                 break;
3683         case SRB_CT_CMD:
3684                 IS_FWI2_CAPABLE(ha) ?
3685                     qla24xx_ct_iocb(sp, pkt) :
3686                     qla2x00_ct_iocb(sp, pkt);
3687                 break;
3688         case SRB_ADISC_CMD:
3689                 IS_FWI2_CAPABLE(ha) ?
3690                     qla24xx_adisc_iocb(sp, pkt) :
3691                     qla2x00_adisc_iocb(sp, pkt);
3692                 break;
3693         case SRB_TM_CMD:
3694                 IS_QLAFX00(ha) ?
3695                     qlafx00_tm_iocb(sp, pkt) :
3696                     qla24xx_tm_iocb(sp, pkt);
3697                 break;
3698         case SRB_FXIOCB_DCMD:
3699         case SRB_FXIOCB_BCMD:
3700                 qlafx00_fxdisc_iocb(sp, pkt);
3701                 break;
3702         case SRB_NVME_LS:
3703                 qla_nvme_ls(sp, pkt);
3704                 break;
3705         case SRB_ABT_CMD:
3706                 IS_QLAFX00(ha) ?
3707                         qlafx00_abort_iocb(sp, pkt) :
3708                         qla24xx_abort_iocb(sp, pkt);
3709                 break;
3710         case SRB_ELS_DCMD:
3711                 qla24xx_els_logo_iocb(sp, pkt);
3712                 break;
3713         case SRB_CT_PTHRU_CMD:
3714                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3715                 break;
3716         case SRB_MB_IOCB:
3717                 qla2x00_mb_iocb(sp, pkt);
3718                 break;
3719         case SRB_NACK_PLOGI:
3720         case SRB_NACK_PRLI:
3721         case SRB_NACK_LOGO:
3722                 qla2x00_send_notify_ack_iocb(sp, pkt);
3723                 break;
3724         case SRB_CTRL_VP:
3725                 qla25xx_ctrlvp_iocb(sp, pkt);
3726                 break;
3727         case SRB_PRLO_CMD:
3728                 qla24xx_prlo_iocb(sp, pkt);
3729                 break;
3730         default:
3731                 break;
3732         }
3733
3734         wmb();
3735         qla2x00_start_iocbs(vha, qp->req);
3736 done:
3737         spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3738         return rval;
3739 }
3740
3741 static void
3742 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3743                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3744 {
3745         uint16_t avail_dsds;
3746         uint32_t *cur_dsd;
3747         uint32_t req_data_len = 0;
3748         uint32_t rsp_data_len = 0;
3749         struct scatterlist *sg;
3750         int index;
3751         int entry_count = 1;
3752         struct bsg_job *bsg_job = sp->u.bsg_job;
3753
3754         /*Update entry type to indicate bidir command */
3755         *((uint32_t *)(&cmd_pkt->entry_type)) =
3756                 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3757
3758         /* Set the transfer direction, in this set both flags
3759          * Also set the BD_WRAP_BACK flag, firmware will take care
3760          * assigning DID=SID for outgoing pkts.
3761          */
3762         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3763         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3764         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3765                                                         BD_WRAP_BACK);
3766
3767         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3768         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3769         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3770         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3771
3772         vha->bidi_stats.transfer_bytes += req_data_len;
3773         vha->bidi_stats.io_count++;
3774
3775         vha->qla_stats.output_bytes += req_data_len;
3776         vha->qla_stats.output_requests++;
3777
3778         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3779          * are bundled in continuation iocb
3780          */
3781         avail_dsds = 1;
3782         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3783
3784         index = 0;
3785
3786         for_each_sg(bsg_job->request_payload.sg_list, sg,
3787                                 bsg_job->request_payload.sg_cnt, index) {
3788                 dma_addr_t sle_dma;
3789                 cont_a64_entry_t *cont_pkt;
3790
3791                 /* Allocate additional continuation packets */
3792                 if (avail_dsds == 0) {
3793                         /* Continuation type 1 IOCB can accomodate
3794                          * 5 DSDS
3795                          */
3796                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3797                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3798                         avail_dsds = 5;
3799                         entry_count++;
3800                 }
3801                 sle_dma = sg_dma_address(sg);
3802                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3803                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3804                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3805                 avail_dsds--;
3806         }
3807         /* For read request DSD will always goes to continuation IOCB
3808          * and follow the write DSD. If there is room on the current IOCB
3809          * then it is added to that IOCB else new continuation IOCB is
3810          * allocated.
3811          */
3812         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3813                                 bsg_job->reply_payload.sg_cnt, index) {
3814                 dma_addr_t sle_dma;
3815                 cont_a64_entry_t *cont_pkt;
3816
3817                 /* Allocate additional continuation packets */
3818                 if (avail_dsds == 0) {
3819                         /* Continuation type 1 IOCB can accomodate
3820                          * 5 DSDS
3821                          */
3822                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3823                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3824                         avail_dsds = 5;
3825                         entry_count++;
3826                 }
3827                 sle_dma = sg_dma_address(sg);
3828                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3829                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3830                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3831                 avail_dsds--;
3832         }
3833         /* This value should be same as number of IOCB required for this cmd */
3834         cmd_pkt->entry_count = entry_count;
3835 }
3836
3837 int
3838 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3839 {
3840
3841         struct qla_hw_data *ha = vha->hw;
3842         unsigned long flags;
3843         uint32_t handle;
3844         uint32_t index;
3845         uint16_t req_cnt;
3846         uint16_t cnt;
3847         uint32_t *clr_ptr;
3848         struct cmd_bidir *cmd_pkt = NULL;
3849         struct rsp_que *rsp;
3850         struct req_que *req;
3851         int rval = EXT_STATUS_OK;
3852
3853         rval = QLA_SUCCESS;
3854
3855         rsp = ha->rsp_q_map[0];
3856         req = vha->req;
3857
3858         /* Send marker if required */
3859         if (vha->marker_needed != 0) {
3860                 if (qla2x00_marker(vha, req,
3861                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3862                         return EXT_STATUS_MAILBOX;
3863                 vha->marker_needed = 0;
3864         }
3865
3866         /* Acquire ring specific lock */
3867         spin_lock_irqsave(&ha->hardware_lock, flags);
3868
3869         /* Check for room in outstanding command list. */
3870         handle = req->current_outstanding_cmd;
3871         for (index = 1; index < req->num_outstanding_cmds; index++) {
3872                 handle++;
3873                 if (handle == req->num_outstanding_cmds)
3874                         handle = 1;
3875                 if (!req->outstanding_cmds[handle])
3876                         break;
3877         }
3878
3879         if (index == req->num_outstanding_cmds) {
3880                 rval = EXT_STATUS_BUSY;
3881                 goto queuing_error;
3882         }
3883
3884         /* Calculate number of IOCB required */
3885         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3886
3887         /* Check for room on request queue. */
3888         if (req->cnt < req_cnt + 2) {
3889                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3890                     RD_REG_DWORD_RELAXED(req->req_q_out);
3891                 if  (req->ring_index < cnt)
3892                         req->cnt = cnt - req->ring_index;
3893                 else
3894                         req->cnt = req->length -
3895                                 (req->ring_index - cnt);
3896         }
3897         if (req->cnt < req_cnt + 2) {
3898                 rval = EXT_STATUS_BUSY;
3899                 goto queuing_error;
3900         }
3901
3902         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3903         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3904
3905         /* Zero out remaining portion of packet. */
3906         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3907         clr_ptr = (uint32_t *)cmd_pkt + 2;
3908         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3909
3910         /* Set NPORT-ID  (of vha)*/
3911         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3912         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3913         cmd_pkt->port_id[1] = vha->d_id.b.area;
3914         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3915
3916         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3917         cmd_pkt->entry_status = (uint8_t) rsp->id;
3918         /* Build command packet. */
3919         req->current_outstanding_cmd = handle;
3920         req->outstanding_cmds[handle] = sp;
3921         sp->handle = handle;
3922         req->cnt -= req_cnt;
3923
3924         /* Send the command to the firmware */
3925         wmb();
3926         qla2x00_start_iocbs(vha, req);
3927 queuing_error:
3928         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3929         return rval;
3930 }