]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
Merge branch 'next-integrity' of git://git.kernel.org/pub/scm/linux/kernel/git/zohar...
[linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @sp: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @vha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @vha: HA context
118  * @req: request queue
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         /* Adjust ring index. */
128         req->ring_index++;
129         if (req->ring_index == req->length) {
130                 req->ring_index = 0;
131                 req->ring_ptr = req->ring;
132         } else {
133                 req->ring_ptr++;
134         }
135
136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138         /* Load packet defaults. */
139         put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140                            CONTINUE_A64_TYPE, &cont_pkt->entry_type);
141
142         return (cont_pkt);
143 }
144
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         struct dsd32    *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
205
206         /* No data transfer */
207         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208                 cmd_pkt->byte_count = cpu_to_le32(0);
209                 return;
210         }
211
212         vha = sp->vha;
213         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
214
215         /* Three DSDs are available in the Command Type 2 IOCB */
216         avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
217         cur_dsd = cmd_pkt->dsd32;
218
219         /* Load data segments */
220         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221                 cont_entry_t *cont_pkt;
222
223                 /* Allocate additional continuation packets? */
224                 if (avail_dsds == 0) {
225                         /*
226                          * Seven DSDs are available in the Continuation
227                          * Type 0 IOCB.
228                          */
229                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230                         cur_dsd = cont_pkt->dsd;
231                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
232                 }
233
234                 append_dsd32(&cur_dsd, sg);
235                 avail_dsds--;
236         }
237 }
238
239 /**
240  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
241  * capable IOCB types.
242  *
243  * @sp: SRB command to process
244  * @cmd_pkt: Command type 3 IOCB
245  * @tot_dsds: Total number of segments to transfer
246  */
247 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
248     uint16_t tot_dsds)
249 {
250         uint16_t        avail_dsds;
251         struct dsd64    *cur_dsd;
252         scsi_qla_host_t *vha;
253         struct scsi_cmnd *cmd;
254         struct scatterlist *sg;
255         int i;
256
257         cmd = GET_CMD_SP(sp);
258
259         /* Update entry type to indicate Command Type 3 IOCB */
260         put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
261
262         /* No data transfer */
263         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
264                 cmd_pkt->byte_count = cpu_to_le32(0);
265                 return;
266         }
267
268         vha = sp->vha;
269         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
270
271         /* Two DSDs are available in the Command Type 3 IOCB */
272         avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
273         cur_dsd = cmd_pkt->dsd64;
274
275         /* Load data segments */
276         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
277                 cont_a64_entry_t *cont_pkt;
278
279                 /* Allocate additional continuation packets? */
280                 if (avail_dsds == 0) {
281                         /*
282                          * Five DSDs are available in the Continuation
283                          * Type 1 IOCB.
284                          */
285                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
286                         cur_dsd = cont_pkt->dsd;
287                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
288                 }
289
290                 append_dsd64(&cur_dsd, sg);
291                 avail_dsds--;
292         }
293 }
294
295 /*
296  * Find the first handle that is not in use, starting from
297  * req->current_outstanding_cmd + 1. The caller must hold the lock that is
298  * associated with @req.
299  */
300 uint32_t qla2xxx_get_next_handle(struct req_que *req)
301 {
302         uint32_t index, handle = req->current_outstanding_cmd;
303
304         for (index = 1; index < req->num_outstanding_cmds; index++) {
305                 handle++;
306                 if (handle == req->num_outstanding_cmds)
307                         handle = 1;
308                 if (!req->outstanding_cmds[handle])
309                         return handle;
310         }
311
312         return 0;
313 }
314
315 /**
316  * qla2x00_start_scsi() - Send a SCSI command to the ISP
317  * @sp: command to send to the ISP
318  *
319  * Returns non-zero if a failure occurred, else zero.
320  */
321 int
322 qla2x00_start_scsi(srb_t *sp)
323 {
324         int             nseg;
325         unsigned long   flags;
326         scsi_qla_host_t *vha;
327         struct scsi_cmnd *cmd;
328         uint32_t        *clr_ptr;
329         uint32_t        handle;
330         cmd_entry_t     *cmd_pkt;
331         uint16_t        cnt;
332         uint16_t        req_cnt;
333         uint16_t        tot_dsds;
334         struct device_reg_2xxx __iomem *reg;
335         struct qla_hw_data *ha;
336         struct req_que *req;
337         struct rsp_que *rsp;
338
339         /* Setup device pointers. */
340         vha = sp->vha;
341         ha = vha->hw;
342         reg = &ha->iobase->isp;
343         cmd = GET_CMD_SP(sp);
344         req = ha->req_q_map[0];
345         rsp = ha->rsp_q_map[0];
346         /* So we know we haven't pci_map'ed anything yet */
347         tot_dsds = 0;
348
349         /* Send marker if required */
350         if (vha->marker_needed != 0) {
351                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
352                     QLA_SUCCESS) {
353                         return (QLA_FUNCTION_FAILED);
354                 }
355                 vha->marker_needed = 0;
356         }
357
358         /* Acquire ring specific lock */
359         spin_lock_irqsave(&ha->hardware_lock, flags);
360
361         handle = qla2xxx_get_next_handle(req);
362         if (handle == 0)
363                 goto queuing_error;
364
365         /* Map the sg table so we have an accurate count of sg entries needed */
366         if (scsi_sg_count(cmd)) {
367                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368                     scsi_sg_count(cmd), cmd->sc_data_direction);
369                 if (unlikely(!nseg))
370                         goto queuing_error;
371         } else
372                 nseg = 0;
373
374         tot_dsds = nseg;
375
376         /* Calculate the number of request entries needed. */
377         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378         if (req->cnt < (req_cnt + 2)) {
379                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380                 if (req->ring_index < cnt)
381                         req->cnt = cnt - req->ring_index;
382                 else
383                         req->cnt = req->length -
384                             (req->ring_index - cnt);
385                 /* If still no head room then bail out */
386                 if (req->cnt < (req_cnt + 2))
387                         goto queuing_error;
388         }
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
408
409         /* Load SCSI command packet. */
410         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
411         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
412
413         /* Build IOCB segments */
414         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
415
416         /* Set total data segment count. */
417         cmd_pkt->entry_count = (uint8_t)req_cnt;
418         wmb();
419
420         /* Adjust ring index. */
421         req->ring_index++;
422         if (req->ring_index == req->length) {
423                 req->ring_index = 0;
424                 req->ring_ptr = req->ring;
425         } else
426                 req->ring_ptr++;
427
428         sp->flags |= SRB_DMA_VALID;
429
430         /* Set chip new ring index. */
431         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
432         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
433
434         /* Manage unprocessed RIO/ZIO commands in response queue. */
435         if (vha->flags.process_response_queue &&
436             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
437                 qla2x00_process_response_queue(rsp);
438
439         spin_unlock_irqrestore(&ha->hardware_lock, flags);
440         return (QLA_SUCCESS);
441
442 queuing_error:
443         if (tot_dsds)
444                 scsi_dma_unmap(cmd);
445
446         spin_unlock_irqrestore(&ha->hardware_lock, flags);
447
448         return (QLA_FUNCTION_FAILED);
449 }
450
451 /**
452  * qla2x00_start_iocbs() - Execute the IOCB command
453  * @vha: HA context
454  * @req: request queue
455  */
456 void
457 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
458 {
459         struct qla_hw_data *ha = vha->hw;
460         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
461
462         if (IS_P3P_TYPE(ha)) {
463                 qla82xx_start_iocbs(vha);
464         } else {
465                 /* Adjust ring index. */
466                 req->ring_index++;
467                 if (req->ring_index == req->length) {
468                         req->ring_index = 0;
469                         req->ring_ptr = req->ring;
470                 } else
471                         req->ring_ptr++;
472
473                 /* Set chip new ring index. */
474                 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
475                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
476                 } else if (IS_QLA83XX(ha)) {
477                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
478                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
479                 } else if (IS_QLAFX00(ha)) {
480                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
481                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
482                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
483                 } else if (IS_FWI2_CAPABLE(ha)) {
484                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
485                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
486                 } else {
487                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
488                                 req->ring_index);
489                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
490                 }
491         }
492 }
493
494 /**
495  * qla2x00_marker() - Send a marker IOCB to the firmware.
496  * @vha: HA context
497  * @qpair: queue pair pointer
498  * @loop_id: loop ID
499  * @lun: LUN
500  * @type: marker modifier
501  *
502  * Can be called from both normal and interrupt context.
503  *
504  * Returns non-zero if a failure occurred, else zero.
505  */
506 static int
507 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
508     uint16_t loop_id, uint64_t lun, uint8_t type)
509 {
510         mrk_entry_t *mrk;
511         struct mrk_entry_24xx *mrk24 = NULL;
512         struct req_que *req = qpair->req;
513         struct qla_hw_data *ha = vha->hw;
514         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
515
516         mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
517         if (mrk == NULL) {
518                 ql_log(ql_log_warn, base_vha, 0x3026,
519                     "Failed to allocate Marker IOCB.\n");
520
521                 return (QLA_FUNCTION_FAILED);
522         }
523
524         mrk->entry_type = MARKER_TYPE;
525         mrk->modifier = type;
526         if (type != MK_SYNC_ALL) {
527                 if (IS_FWI2_CAPABLE(ha)) {
528                         mrk24 = (struct mrk_entry_24xx *) mrk;
529                         mrk24->nport_handle = cpu_to_le16(loop_id);
530                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
531                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
532                         mrk24->vp_index = vha->vp_idx;
533                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
534                 } else {
535                         SET_TARGET_ID(ha, mrk->target, loop_id);
536                         mrk->lun = cpu_to_le16((uint16_t)lun);
537                 }
538         }
539         wmb();
540
541         qla2x00_start_iocbs(vha, req);
542
543         return (QLA_SUCCESS);
544 }
545
546 int
547 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
548     uint16_t loop_id, uint64_t lun, uint8_t type)
549 {
550         int ret;
551         unsigned long flags = 0;
552
553         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
554         ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
555         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
556
557         return (ret);
558 }
559
560 /*
561  * qla2x00_issue_marker
562  *
563  * Issue marker
564  * Caller CAN have hardware lock held as specified by ha_locked parameter.
565  * Might release it, then reaquire.
566  */
567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568 {
569         if (ha_locked) {
570                 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
571                                         MK_SYNC_ALL) != QLA_SUCCESS)
572                         return QLA_FUNCTION_FAILED;
573         } else {
574                 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
575                                         MK_SYNC_ALL) != QLA_SUCCESS)
576                         return QLA_FUNCTION_FAILED;
577         }
578         vha->marker_needed = 0;
579
580         return QLA_SUCCESS;
581 }
582
583 static inline int
584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585         uint16_t tot_dsds)
586 {
587         struct dsd64 *cur_dsd = NULL, *next_dsd;
588         scsi_qla_host_t *vha;
589         struct qla_hw_data *ha;
590         struct scsi_cmnd *cmd;
591         struct  scatterlist *cur_seg;
592         uint8_t avail_dsds;
593         uint8_t first_iocb = 1;
594         uint32_t dsd_list_len;
595         struct dsd_dma *dsd_ptr;
596         struct ct6_dsd *ctx;
597
598         cmd = GET_CMD_SP(sp);
599
600         /* Update entry type to indicate Command Type 3 IOCB */
601         put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
602
603         /* No data transfer */
604         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
605                 cmd_pkt->byte_count = cpu_to_le32(0);
606                 return 0;
607         }
608
609         vha = sp->vha;
610         ha = vha->hw;
611
612         /* Set transfer direction */
613         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
614                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
615                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
616                 vha->qla_stats.output_requests++;
617         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
618                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
619                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
620                 vha->qla_stats.input_requests++;
621         }
622
623         cur_seg = scsi_sglist(cmd);
624         ctx = sp->u.scmd.ct6_ctx;
625
626         while (tot_dsds) {
627                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
628                     QLA_DSDS_PER_IOCB : tot_dsds;
629                 tot_dsds -= avail_dsds;
630                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
631
632                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
633                     struct dsd_dma, list);
634                 next_dsd = dsd_ptr->dsd_addr;
635                 list_del(&dsd_ptr->list);
636                 ha->gbl_dsd_avail--;
637                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
638                 ctx->dsd_use_cnt++;
639                 ha->gbl_dsd_inuse++;
640
641                 if (first_iocb) {
642                         first_iocb = 0;
643                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
644                                            &cmd_pkt->fcp_dsd.address);
645                         cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
646                 } else {
647                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
648                                            &cur_dsd->address);
649                         cur_dsd->length = cpu_to_le32(dsd_list_len);
650                         cur_dsd++;
651                 }
652                 cur_dsd = next_dsd;
653                 while (avail_dsds) {
654                         append_dsd64(&cur_dsd, cur_seg);
655                         cur_seg = sg_next(cur_seg);
656                         avail_dsds--;
657                 }
658         }
659
660         /* Null termination */
661         cur_dsd->address = 0;
662         cur_dsd->length = 0;
663         cur_dsd++;
664         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
665         return 0;
666 }
667
668 /*
669  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
670  * for Command Type 6.
671  *
672  * @dsds: number of data segment decriptors needed
673  *
674  * Returns the number of dsd list needed to store @dsds.
675  */
676 static inline uint16_t
677 qla24xx_calc_dsd_lists(uint16_t dsds)
678 {
679         uint16_t dsd_lists = 0;
680
681         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
682         if (dsds % QLA_DSDS_PER_IOCB)
683                 dsd_lists++;
684         return dsd_lists;
685 }
686
687
688 /**
689  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
690  * IOCB types.
691  *
692  * @sp: SRB command to process
693  * @cmd_pkt: Command type 3 IOCB
694  * @tot_dsds: Total number of segments to transfer
695  * @req: pointer to request queue
696  */
697 inline void
698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
699         uint16_t tot_dsds, struct req_que *req)
700 {
701         uint16_t        avail_dsds;
702         struct dsd64    *cur_dsd;
703         scsi_qla_host_t *vha;
704         struct scsi_cmnd *cmd;
705         struct scatterlist *sg;
706         int i;
707
708         cmd = GET_CMD_SP(sp);
709
710         /* Update entry type to indicate Command Type 3 IOCB */
711         put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
712
713         /* No data transfer */
714         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
715                 cmd_pkt->byte_count = cpu_to_le32(0);
716                 return;
717         }
718
719         vha = sp->vha;
720
721         /* Set transfer direction */
722         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
723                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
724                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
725                 vha->qla_stats.output_requests++;
726         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
727                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
728                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
729                 vha->qla_stats.input_requests++;
730         }
731
732         /* One DSD is available in the Command Type 3 IOCB */
733         avail_dsds = 1;
734         cur_dsd = &cmd_pkt->dsd;
735
736         /* Load data segments */
737
738         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
739                 cont_a64_entry_t *cont_pkt;
740
741                 /* Allocate additional continuation packets? */
742                 if (avail_dsds == 0) {
743                         /*
744                          * Five DSDs are available in the Continuation
745                          * Type 1 IOCB.
746                          */
747                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
748                         cur_dsd = cont_pkt->dsd;
749                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
750                 }
751
752                 append_dsd64(&cur_dsd, sg);
753                 avail_dsds--;
754         }
755 }
756
757 struct fw_dif_context {
758         uint32_t ref_tag;
759         uint16_t app_tag;
760         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
761         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
762 };
763
764 /*
765  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
766  *
767  */
768 static inline void
769 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
770     unsigned int protcnt)
771 {
772         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
773
774         switch (scsi_get_prot_type(cmd)) {
775         case SCSI_PROT_DIF_TYPE0:
776                 /*
777                  * No check for ql2xenablehba_err_chk, as it would be an
778                  * I/O error if hba tag generation is not done.
779                  */
780                 pkt->ref_tag = cpu_to_le32((uint32_t)
781                     (0xffffffff & scsi_get_lba(cmd)));
782
783                 if (!qla2x00_hba_err_chk_enabled(sp))
784                         break;
785
786                 pkt->ref_tag_mask[0] = 0xff;
787                 pkt->ref_tag_mask[1] = 0xff;
788                 pkt->ref_tag_mask[2] = 0xff;
789                 pkt->ref_tag_mask[3] = 0xff;
790                 break;
791
792         /*
793          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
794          * match LBA in CDB + N
795          */
796         case SCSI_PROT_DIF_TYPE2:
797                 pkt->app_tag = cpu_to_le16(0);
798                 pkt->app_tag_mask[0] = 0x0;
799                 pkt->app_tag_mask[1] = 0x0;
800
801                 pkt->ref_tag = cpu_to_le32((uint32_t)
802                     (0xffffffff & scsi_get_lba(cmd)));
803
804                 if (!qla2x00_hba_err_chk_enabled(sp))
805                         break;
806
807                 /* enable ALL bytes of the ref tag */
808                 pkt->ref_tag_mask[0] = 0xff;
809                 pkt->ref_tag_mask[1] = 0xff;
810                 pkt->ref_tag_mask[2] = 0xff;
811                 pkt->ref_tag_mask[3] = 0xff;
812                 break;
813
814         /* For Type 3 protection: 16 bit GUARD only */
815         case SCSI_PROT_DIF_TYPE3:
816                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
817                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
818                                                                 0x00;
819                 break;
820
821         /*
822          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
823          * 16 bit app tag.
824          */
825         case SCSI_PROT_DIF_TYPE1:
826                 pkt->ref_tag = cpu_to_le32((uint32_t)
827                     (0xffffffff & scsi_get_lba(cmd)));
828                 pkt->app_tag = cpu_to_le16(0);
829                 pkt->app_tag_mask[0] = 0x0;
830                 pkt->app_tag_mask[1] = 0x0;
831
832                 if (!qla2x00_hba_err_chk_enabled(sp))
833                         break;
834
835                 /* enable ALL bytes of the ref tag */
836                 pkt->ref_tag_mask[0] = 0xff;
837                 pkt->ref_tag_mask[1] = 0xff;
838                 pkt->ref_tag_mask[2] = 0xff;
839                 pkt->ref_tag_mask[3] = 0xff;
840                 break;
841         }
842 }
843
844 int
845 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
846         uint32_t *partial)
847 {
848         struct scatterlist *sg;
849         uint32_t cumulative_partial, sg_len;
850         dma_addr_t sg_dma_addr;
851
852         if (sgx->num_bytes == sgx->tot_bytes)
853                 return 0;
854
855         sg = sgx->cur_sg;
856         cumulative_partial = sgx->tot_partial;
857
858         sg_dma_addr = sg_dma_address(sg);
859         sg_len = sg_dma_len(sg);
860
861         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
862
863         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
864                 sgx->dma_len = (blk_sz - cumulative_partial);
865                 sgx->tot_partial = 0;
866                 sgx->num_bytes += blk_sz;
867                 *partial = 0;
868         } else {
869                 sgx->dma_len = sg_len - sgx->bytes_consumed;
870                 sgx->tot_partial += sgx->dma_len;
871                 *partial = 1;
872         }
873
874         sgx->bytes_consumed += sgx->dma_len;
875
876         if (sg_len == sgx->bytes_consumed) {
877                 sg = sg_next(sg);
878                 sgx->num_sg++;
879                 sgx->cur_sg = sg;
880                 sgx->bytes_consumed = 0;
881         }
882
883         return 1;
884 }
885
886 int
887 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
888         struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
889 {
890         void *next_dsd;
891         uint8_t avail_dsds = 0;
892         uint32_t dsd_list_len;
893         struct dsd_dma *dsd_ptr;
894         struct scatterlist *sg_prot;
895         struct dsd64 *cur_dsd = dsd;
896         uint16_t        used_dsds = tot_dsds;
897         uint32_t        prot_int; /* protection interval */
898         uint32_t        partial;
899         struct qla2_sgx sgx;
900         dma_addr_t      sle_dma;
901         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
902         struct scsi_cmnd *cmd;
903
904         memset(&sgx, 0, sizeof(struct qla2_sgx));
905         if (sp) {
906                 cmd = GET_CMD_SP(sp);
907                 prot_int = cmd->device->sector_size;
908
909                 sgx.tot_bytes = scsi_bufflen(cmd);
910                 sgx.cur_sg = scsi_sglist(cmd);
911                 sgx.sp = sp;
912
913                 sg_prot = scsi_prot_sglist(cmd);
914         } else if (tc) {
915                 prot_int      = tc->blk_sz;
916                 sgx.tot_bytes = tc->bufflen;
917                 sgx.cur_sg    = tc->sg;
918                 sg_prot       = tc->prot_sg;
919         } else {
920                 BUG();
921                 return 1;
922         }
923
924         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
925
926                 sle_dma = sgx.dma_addr;
927                 sle_dma_len = sgx.dma_len;
928 alloc_and_fill:
929                 /* Allocate additional continuation packets? */
930                 if (avail_dsds == 0) {
931                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
932                                         QLA_DSDS_PER_IOCB : used_dsds;
933                         dsd_list_len = (avail_dsds + 1) * 12;
934                         used_dsds -= avail_dsds;
935
936                         /* allocate tracking DS */
937                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
938                         if (!dsd_ptr)
939                                 return 1;
940
941                         /* allocate new list */
942                         dsd_ptr->dsd_addr = next_dsd =
943                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
944                                 &dsd_ptr->dsd_list_dma);
945
946                         if (!next_dsd) {
947                                 /*
948                                  * Need to cleanup only this dsd_ptr, rest
949                                  * will be done by sp_free_dma()
950                                  */
951                                 kfree(dsd_ptr);
952                                 return 1;
953                         }
954
955                         if (sp) {
956                                 list_add_tail(&dsd_ptr->list,
957                                               &sp->u.scmd.crc_ctx->dsd_list);
958
959                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
960                         } else {
961                                 list_add_tail(&dsd_ptr->list,
962                                     &(tc->ctx->dsd_list));
963                                 *tc->ctx_dsd_alloced = 1;
964                         }
965
966
967                         /* add new list to cmd iocb or last list */
968                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
969                                            &cur_dsd->address);
970                         cur_dsd->length = cpu_to_le32(dsd_list_len);
971                         cur_dsd = next_dsd;
972                 }
973                 put_unaligned_le64(sle_dma, &cur_dsd->address);
974                 cur_dsd->length = cpu_to_le32(sle_dma_len);
975                 cur_dsd++;
976                 avail_dsds--;
977
978                 if (partial == 0) {
979                         /* Got a full protection interval */
980                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
981                         sle_dma_len = 8;
982
983                         tot_prot_dma_len += sle_dma_len;
984                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
985                                 tot_prot_dma_len = 0;
986                                 sg_prot = sg_next(sg_prot);
987                         }
988
989                         partial = 1; /* So as to not re-enter this block */
990                         goto alloc_and_fill;
991                 }
992         }
993         /* Null termination */
994         cur_dsd->address = 0;
995         cur_dsd->length = 0;
996         cur_dsd++;
997         return 0;
998 }
999
1000 int
1001 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1002         struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1003 {
1004         void *next_dsd;
1005         uint8_t avail_dsds = 0;
1006         uint32_t dsd_list_len;
1007         struct dsd_dma *dsd_ptr;
1008         struct scatterlist *sg, *sgl;
1009         struct dsd64 *cur_dsd = dsd;
1010         int     i;
1011         uint16_t        used_dsds = tot_dsds;
1012         struct scsi_cmnd *cmd;
1013
1014         if (sp) {
1015                 cmd = GET_CMD_SP(sp);
1016                 sgl = scsi_sglist(cmd);
1017         } else if (tc) {
1018                 sgl = tc->sg;
1019         } else {
1020                 BUG();
1021                 return 1;
1022         }
1023
1024
1025         for_each_sg(sgl, sg, tot_dsds, i) {
1026                 /* Allocate additional continuation packets? */
1027                 if (avail_dsds == 0) {
1028                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1029                                         QLA_DSDS_PER_IOCB : used_dsds;
1030                         dsd_list_len = (avail_dsds + 1) * 12;
1031                         used_dsds -= avail_dsds;
1032
1033                         /* allocate tracking DS */
1034                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1035                         if (!dsd_ptr)
1036                                 return 1;
1037
1038                         /* allocate new list */
1039                         dsd_ptr->dsd_addr = next_dsd =
1040                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1041                                 &dsd_ptr->dsd_list_dma);
1042
1043                         if (!next_dsd) {
1044                                 /*
1045                                  * Need to cleanup only this dsd_ptr, rest
1046                                  * will be done by sp_free_dma()
1047                                  */
1048                                 kfree(dsd_ptr);
1049                                 return 1;
1050                         }
1051
1052                         if (sp) {
1053                                 list_add_tail(&dsd_ptr->list,
1054                                               &sp->u.scmd.crc_ctx->dsd_list);
1055
1056                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1057                         } else {
1058                                 list_add_tail(&dsd_ptr->list,
1059                                     &(tc->ctx->dsd_list));
1060                                 *tc->ctx_dsd_alloced = 1;
1061                         }
1062
1063                         /* add new list to cmd iocb or last list */
1064                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
1065                                            &cur_dsd->address);
1066                         cur_dsd->length = cpu_to_le32(dsd_list_len);
1067                         cur_dsd = next_dsd;
1068                 }
1069                 append_dsd64(&cur_dsd, sg);
1070                 avail_dsds--;
1071
1072         }
1073         /* Null termination */
1074         cur_dsd->address = 0;
1075         cur_dsd->length = 0;
1076         cur_dsd++;
1077         return 0;
1078 }
1079
1080 int
1081 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1082         struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1083 {
1084         struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1085         struct scatterlist *sg, *sgl;
1086         struct crc_context *difctx = NULL;
1087         struct scsi_qla_host *vha;
1088         uint dsd_list_len;
1089         uint avail_dsds = 0;
1090         uint used_dsds = tot_dsds;
1091         bool dif_local_dma_alloc = false;
1092         bool direction_to_device = false;
1093         int i;
1094
1095         if (sp) {
1096                 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1097
1098                 sgl = scsi_prot_sglist(cmd);
1099                 vha = sp->vha;
1100                 difctx = sp->u.scmd.crc_ctx;
1101                 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1102                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1103                   "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1104                         __func__, cmd, difctx, sp);
1105         } else if (tc) {
1106                 vha = tc->vha;
1107                 sgl = tc->prot_sg;
1108                 difctx = tc->ctx;
1109                 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1110         } else {
1111                 BUG();
1112                 return 1;
1113         }
1114
1115         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1116             "%s: enter (write=%u)\n", __func__, direction_to_device);
1117
1118         /* if initiator doing write or target doing read */
1119         if (direction_to_device) {
1120                 for_each_sg(sgl, sg, tot_dsds, i) {
1121                         u64 sle_phys = sg_phys(sg);
1122
1123                         /* If SGE addr + len flips bits in upper 32-bits */
1124                         if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1125                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1126                                     "%s: page boundary crossing (phys=%llx len=%x)\n",
1127                                     __func__, sle_phys, sg->length);
1128
1129                                 if (difctx) {
1130                                         ha->dif_bundle_crossed_pages++;
1131                                         dif_local_dma_alloc = true;
1132                                 } else {
1133                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1134                                             vha, 0xe022,
1135                                             "%s: difctx pointer is NULL\n",
1136                                             __func__);
1137                                 }
1138                                 break;
1139                         }
1140                 }
1141                 ha->dif_bundle_writes++;
1142         } else {
1143                 ha->dif_bundle_reads++;
1144         }
1145
1146         if (ql2xdifbundlinginternalbuffers)
1147                 dif_local_dma_alloc = direction_to_device;
1148
1149         if (dif_local_dma_alloc) {
1150                 u32 track_difbundl_buf = 0;
1151                 u32 ldma_sg_len = 0;
1152                 u8 ldma_needed = 1;
1153
1154                 difctx->no_dif_bundl = 0;
1155                 difctx->dif_bundl_len = 0;
1156
1157                 /* Track DSD buffers */
1158                 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1159                 /* Track local DMA buffers */
1160                 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1161
1162                 for_each_sg(sgl, sg, tot_dsds, i) {
1163                         u32 sglen = sg_dma_len(sg);
1164
1165                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1166                             "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1167                             __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1168                             difctx->dif_bundl_len, ldma_needed);
1169
1170                         while (sglen) {
1171                                 u32 xfrlen = 0;
1172
1173                                 if (ldma_needed) {
1174                                         /*
1175                                          * Allocate list item to store
1176                                          * the DMA buffers
1177                                          */
1178                                         dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1179                                             GFP_ATOMIC);
1180                                         if (!dsd_ptr) {
1181                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1182                                                     "%s: failed alloc dsd_ptr\n",
1183                                                     __func__);
1184                                                 return 1;
1185                                         }
1186                                         ha->dif_bundle_kallocs++;
1187
1188                                         /* allocate dma buffer */
1189                                         dsd_ptr->dsd_addr = dma_pool_alloc
1190                                                 (ha->dif_bundl_pool, GFP_ATOMIC,
1191                                                  &dsd_ptr->dsd_list_dma);
1192                                         if (!dsd_ptr->dsd_addr) {
1193                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1194                                                     "%s: failed alloc ->dsd_ptr\n",
1195                                                     __func__);
1196                                                 /*
1197                                                  * need to cleanup only this
1198                                                  * dsd_ptr rest will be done
1199                                                  * by sp_free_dma()
1200                                                  */
1201                                                 kfree(dsd_ptr);
1202                                                 ha->dif_bundle_kallocs--;
1203                                                 return 1;
1204                                         }
1205                                         ha->dif_bundle_dma_allocs++;
1206                                         ldma_needed = 0;
1207                                         difctx->no_dif_bundl++;
1208                                         list_add_tail(&dsd_ptr->list,
1209                                             &difctx->ldif_dma_hndl_list);
1210                                 }
1211
1212                                 /* xfrlen is min of dma pool size and sglen */
1213                                 xfrlen = (sglen >
1214                                    (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1215                                     DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1216                                     sglen;
1217
1218                                 /* replace with local allocated dma buffer */
1219                                 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1220                                     dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1221                                     difctx->dif_bundl_len);
1222                                 difctx->dif_bundl_len += xfrlen;
1223                                 sglen -= xfrlen;
1224                                 ldma_sg_len += xfrlen;
1225                                 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1226                                     sg_is_last(sg)) {
1227                                         ldma_needed = 1;
1228                                         ldma_sg_len = 0;
1229                                 }
1230                         }
1231                 }
1232
1233                 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1234                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1235                     "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1236                     difctx->dif_bundl_len, difctx->no_dif_bundl,
1237                     track_difbundl_buf);
1238
1239                 if (sp)
1240                         sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1241                 else
1242                         tc->prot_flags = DIF_BUNDL_DMA_VALID;
1243
1244                 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1245                     &difctx->ldif_dma_hndl_list, list) {
1246                         u32 sglen = (difctx->dif_bundl_len >
1247                             DIF_BUNDLING_DMA_POOL_SIZE) ?
1248                             DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1249
1250                         BUG_ON(track_difbundl_buf == 0);
1251
1252                         /* Allocate additional continuation packets? */
1253                         if (avail_dsds == 0) {
1254                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1255                                     0xe024,
1256                                     "%s: adding continuation iocb's\n",
1257                                     __func__);
1258                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1259                                     QLA_DSDS_PER_IOCB : used_dsds;
1260                                 dsd_list_len = (avail_dsds + 1) * 12;
1261                                 used_dsds -= avail_dsds;
1262
1263                                 /* allocate tracking DS */
1264                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1265                                 if (!dsd_ptr) {
1266                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1267                                             "%s: failed alloc dsd_ptr\n",
1268                                             __func__);
1269                                         return 1;
1270                                 }
1271                                 ha->dif_bundle_kallocs++;
1272
1273                                 difctx->no_ldif_dsd++;
1274                                 /* allocate new list */
1275                                 dsd_ptr->dsd_addr =
1276                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1277                                         &dsd_ptr->dsd_list_dma);
1278                                 if (!dsd_ptr->dsd_addr) {
1279                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1280                                             "%s: failed alloc ->dsd_addr\n",
1281                                             __func__);
1282                                         /*
1283                                          * need to cleanup only this dsd_ptr
1284                                          *  rest will be done by sp_free_dma()
1285                                          */
1286                                         kfree(dsd_ptr);
1287                                         ha->dif_bundle_kallocs--;
1288                                         return 1;
1289                                 }
1290                                 ha->dif_bundle_dma_allocs++;
1291
1292                                 if (sp) {
1293                                         list_add_tail(&dsd_ptr->list,
1294                                             &difctx->ldif_dsd_list);
1295                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1296                                 } else {
1297                                         list_add_tail(&dsd_ptr->list,
1298                                             &difctx->ldif_dsd_list);
1299                                         tc->ctx_dsd_alloced = 1;
1300                                 }
1301
1302                                 /* add new list to cmd iocb or last list */
1303                                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1304                                                    &cur_dsd->address);
1305                                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1306                                 cur_dsd = dsd_ptr->dsd_addr;
1307                         }
1308                         put_unaligned_le64(dif_dsd->dsd_list_dma,
1309                                            &cur_dsd->address);
1310                         cur_dsd->length = cpu_to_le32(sglen);
1311                         cur_dsd++;
1312                         avail_dsds--;
1313                         difctx->dif_bundl_len -= sglen;
1314                         track_difbundl_buf--;
1315                 }
1316
1317                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1318                     "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1319                         difctx->no_ldif_dsd, difctx->no_dif_bundl);
1320         } else {
1321                 for_each_sg(sgl, sg, tot_dsds, i) {
1322                         /* Allocate additional continuation packets? */
1323                         if (avail_dsds == 0) {
1324                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1325                                     QLA_DSDS_PER_IOCB : used_dsds;
1326                                 dsd_list_len = (avail_dsds + 1) * 12;
1327                                 used_dsds -= avail_dsds;
1328
1329                                 /* allocate tracking DS */
1330                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1331                                 if (!dsd_ptr) {
1332                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1333                                             vha, 0xe027,
1334                                             "%s: failed alloc dsd_dma...\n",
1335                                             __func__);
1336                                         return 1;
1337                                 }
1338
1339                                 /* allocate new list */
1340                                 dsd_ptr->dsd_addr =
1341                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1342                                         &dsd_ptr->dsd_list_dma);
1343                                 if (!dsd_ptr->dsd_addr) {
1344                                         /* need to cleanup only this dsd_ptr */
1345                                         /* rest will be done by sp_free_dma() */
1346                                         kfree(dsd_ptr);
1347                                         return 1;
1348                                 }
1349
1350                                 if (sp) {
1351                                         list_add_tail(&dsd_ptr->list,
1352                                             &difctx->dsd_list);
1353                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1354                                 } else {
1355                                         list_add_tail(&dsd_ptr->list,
1356                                             &difctx->dsd_list);
1357                                         tc->ctx_dsd_alloced = 1;
1358                                 }
1359
1360                                 /* add new list to cmd iocb or last list */
1361                                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1362                                                    &cur_dsd->address);
1363                                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1364                                 cur_dsd = dsd_ptr->dsd_addr;
1365                         }
1366                         append_dsd64(&cur_dsd, sg);
1367                         avail_dsds--;
1368                 }
1369         }
1370         /* Null termination */
1371         cur_dsd->address = 0;
1372         cur_dsd->length = 0;
1373         cur_dsd++;
1374         return 0;
1375 }
1376
1377 /**
1378  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1379  *                                                      Type 6 IOCB types.
1380  *
1381  * @sp: SRB command to process
1382  * @cmd_pkt: Command type 3 IOCB
1383  * @tot_dsds: Total number of segments to transfer
1384  * @tot_prot_dsds: Total number of segments with protection information
1385  * @fw_prot_opts: Protection options to be passed to firmware
1386  */
1387 static inline int
1388 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1389     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1390 {
1391         struct dsd64            *cur_dsd;
1392         uint32_t                *fcp_dl;
1393         scsi_qla_host_t         *vha;
1394         struct scsi_cmnd        *cmd;
1395         uint32_t                total_bytes = 0;
1396         uint32_t                data_bytes;
1397         uint32_t                dif_bytes;
1398         uint8_t                 bundling = 1;
1399         uint16_t                blk_size;
1400         struct crc_context      *crc_ctx_pkt = NULL;
1401         struct qla_hw_data      *ha;
1402         uint8_t                 additional_fcpcdb_len;
1403         uint16_t                fcp_cmnd_len;
1404         struct fcp_cmnd         *fcp_cmnd;
1405         dma_addr_t              crc_ctx_dma;
1406
1407         cmd = GET_CMD_SP(sp);
1408
1409         /* Update entry type to indicate Command Type CRC_2 IOCB */
1410         put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1411
1412         vha = sp->vha;
1413         ha = vha->hw;
1414
1415         /* No data transfer */
1416         data_bytes = scsi_bufflen(cmd);
1417         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1418                 cmd_pkt->byte_count = cpu_to_le32(0);
1419                 return QLA_SUCCESS;
1420         }
1421
1422         cmd_pkt->vp_index = sp->vha->vp_idx;
1423
1424         /* Set transfer direction */
1425         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1426                 cmd_pkt->control_flags =
1427                     cpu_to_le16(CF_WRITE_DATA);
1428         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1429                 cmd_pkt->control_flags =
1430                     cpu_to_le16(CF_READ_DATA);
1431         }
1432
1433         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1434             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1435             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1436             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1437                 bundling = 0;
1438
1439         /* Allocate CRC context from global pool */
1440         crc_ctx_pkt = sp->u.scmd.crc_ctx =
1441             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1442
1443         if (!crc_ctx_pkt)
1444                 goto crc_queuing_error;
1445
1446         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1447
1448         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1449
1450         /* Set handle */
1451         crc_ctx_pkt->handle = cmd_pkt->handle;
1452
1453         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1454
1455         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1456             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1457
1458         put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1459         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1460
1461         /* Determine SCSI command length -- align to 4 byte boundary */
1462         if (cmd->cmd_len > 16) {
1463                 additional_fcpcdb_len = cmd->cmd_len - 16;
1464                 if ((cmd->cmd_len % 4) != 0) {
1465                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1466                         goto crc_queuing_error;
1467                 }
1468                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1469         } else {
1470                 additional_fcpcdb_len = 0;
1471                 fcp_cmnd_len = 12 + 16 + 4;
1472         }
1473
1474         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1475
1476         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1477         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1478                 fcp_cmnd->additional_cdb_len |= 1;
1479         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1480                 fcp_cmnd->additional_cdb_len |= 2;
1481
1482         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1483         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1484         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1485         put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1486                            &cmd_pkt->fcp_cmnd_dseg_address);
1487         fcp_cmnd->task_management = 0;
1488         fcp_cmnd->task_attribute = TSK_SIMPLE;
1489
1490         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1491
1492         /* Compute dif len and adjust data len to incude protection */
1493         dif_bytes = 0;
1494         blk_size = cmd->device->sector_size;
1495         dif_bytes = (data_bytes / blk_size) * 8;
1496
1497         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1498         case SCSI_PROT_READ_INSERT:
1499         case SCSI_PROT_WRITE_STRIP:
1500                 total_bytes = data_bytes;
1501                 data_bytes += dif_bytes;
1502                 break;
1503
1504         case SCSI_PROT_READ_STRIP:
1505         case SCSI_PROT_WRITE_INSERT:
1506         case SCSI_PROT_READ_PASS:
1507         case SCSI_PROT_WRITE_PASS:
1508                 total_bytes = data_bytes + dif_bytes;
1509                 break;
1510         default:
1511                 BUG();
1512         }
1513
1514         if (!qla2x00_hba_err_chk_enabled(sp))
1515                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1516         /* HBA error checking enabled */
1517         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1518                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1519                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1520                         SCSI_PROT_DIF_TYPE2))
1521                         fw_prot_opts |= BIT_10;
1522                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1523                     SCSI_PROT_DIF_TYPE3)
1524                         fw_prot_opts |= BIT_11;
1525         }
1526
1527         if (!bundling) {
1528                 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1529         } else {
1530                 /*
1531                  * Configure Bundling if we need to fetch interlaving
1532                  * protection PCI accesses
1533                  */
1534                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1535                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1536                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1537                                                         tot_prot_dsds);
1538                 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1539         }
1540
1541         /* Finish the common fields of CRC pkt */
1542         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1543         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1544         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1545         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1546         /* Fibre channel byte count */
1547         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1548         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1549             additional_fcpcdb_len);
1550         *fcp_dl = htonl(total_bytes);
1551
1552         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1553                 cmd_pkt->byte_count = cpu_to_le32(0);
1554                 return QLA_SUCCESS;
1555         }
1556         /* Walks data segments */
1557
1558         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1559
1560         if (!bundling && tot_prot_dsds) {
1561                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1562                         cur_dsd, tot_dsds, NULL))
1563                         goto crc_queuing_error;
1564         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1565                         (tot_dsds - tot_prot_dsds), NULL))
1566                 goto crc_queuing_error;
1567
1568         if (bundling && tot_prot_dsds) {
1569                 /* Walks dif segments */
1570                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1571                 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1572                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1573                                 tot_prot_dsds, NULL))
1574                         goto crc_queuing_error;
1575         }
1576         return QLA_SUCCESS;
1577
1578 crc_queuing_error:
1579         /* Cleanup will be performed by the caller */
1580
1581         return QLA_FUNCTION_FAILED;
1582 }
1583
1584 /**
1585  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1586  * @sp: command to send to the ISP
1587  *
1588  * Returns non-zero if a failure occurred, else zero.
1589  */
1590 int
1591 qla24xx_start_scsi(srb_t *sp)
1592 {
1593         int             nseg;
1594         unsigned long   flags;
1595         uint32_t        *clr_ptr;
1596         uint32_t        handle;
1597         struct cmd_type_7 *cmd_pkt;
1598         uint16_t        cnt;
1599         uint16_t        req_cnt;
1600         uint16_t        tot_dsds;
1601         struct req_que *req = NULL;
1602         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1603         struct scsi_qla_host *vha = sp->vha;
1604         struct qla_hw_data *ha = vha->hw;
1605
1606         /* Setup device pointers. */
1607         req = vha->req;
1608
1609         /* So we know we haven't pci_map'ed anything yet */
1610         tot_dsds = 0;
1611
1612         /* Send marker if required */
1613         if (vha->marker_needed != 0) {
1614                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1615                     QLA_SUCCESS)
1616                         return QLA_FUNCTION_FAILED;
1617                 vha->marker_needed = 0;
1618         }
1619
1620         /* Acquire ring specific lock */
1621         spin_lock_irqsave(&ha->hardware_lock, flags);
1622
1623         handle = qla2xxx_get_next_handle(req);
1624         if (handle == 0)
1625                 goto queuing_error;
1626
1627         /* Map the sg table so we have an accurate count of sg entries needed */
1628         if (scsi_sg_count(cmd)) {
1629                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1630                     scsi_sg_count(cmd), cmd->sc_data_direction);
1631                 if (unlikely(!nseg))
1632                         goto queuing_error;
1633         } else
1634                 nseg = 0;
1635
1636         tot_dsds = nseg;
1637         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1638         if (req->cnt < (req_cnt + 2)) {
1639                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1640                     RD_REG_DWORD_RELAXED(req->req_q_out);
1641                 if (req->ring_index < cnt)
1642                         req->cnt = cnt - req->ring_index;
1643                 else
1644                         req->cnt = req->length -
1645                                 (req->ring_index - cnt);
1646                 if (req->cnt < (req_cnt + 2))
1647                         goto queuing_error;
1648         }
1649
1650         /* Build command packet. */
1651         req->current_outstanding_cmd = handle;
1652         req->outstanding_cmds[handle] = sp;
1653         sp->handle = handle;
1654         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1655         req->cnt -= req_cnt;
1656
1657         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1658         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1659
1660         /* Zero out remaining portion of packet. */
1661         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1662         clr_ptr = (uint32_t *)cmd_pkt + 2;
1663         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1664         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665
1666         /* Set NPORT-ID and LUN number*/
1667         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1668         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1669         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1670         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1671         cmd_pkt->vp_index = sp->vha->vp_idx;
1672
1673         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1674         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1675
1676         cmd_pkt->task = TSK_SIMPLE;
1677
1678         /* Load SCSI command packet. */
1679         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1680         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1681
1682         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1683
1684         /* Build IOCB segments */
1685         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1686
1687         /* Set total data segment count. */
1688         cmd_pkt->entry_count = (uint8_t)req_cnt;
1689         wmb();
1690         /* Adjust ring index. */
1691         req->ring_index++;
1692         if (req->ring_index == req->length) {
1693                 req->ring_index = 0;
1694                 req->ring_ptr = req->ring;
1695         } else
1696                 req->ring_ptr++;
1697
1698         sp->flags |= SRB_DMA_VALID;
1699
1700         /* Set chip new ring index. */
1701         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1702
1703         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1704         return QLA_SUCCESS;
1705
1706 queuing_error:
1707         if (tot_dsds)
1708                 scsi_dma_unmap(cmd);
1709
1710         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1711
1712         return QLA_FUNCTION_FAILED;
1713 }
1714
1715 /**
1716  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1717  * @sp: command to send to the ISP
1718  *
1719  * Returns non-zero if a failure occurred, else zero.
1720  */
1721 int
1722 qla24xx_dif_start_scsi(srb_t *sp)
1723 {
1724         int                     nseg;
1725         unsigned long           flags;
1726         uint32_t                *clr_ptr;
1727         uint32_t                handle;
1728         uint16_t                cnt;
1729         uint16_t                req_cnt = 0;
1730         uint16_t                tot_dsds;
1731         uint16_t                tot_prot_dsds;
1732         uint16_t                fw_prot_opts = 0;
1733         struct req_que          *req = NULL;
1734         struct rsp_que          *rsp = NULL;
1735         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1736         struct scsi_qla_host    *vha = sp->vha;
1737         struct qla_hw_data      *ha = vha->hw;
1738         struct cmd_type_crc_2   *cmd_pkt;
1739         uint32_t                status = 0;
1740
1741 #define QDSS_GOT_Q_SPACE        BIT_0
1742
1743         /* Only process protection or >16 cdb in this routine */
1744         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1745                 if (cmd->cmd_len <= 16)
1746                         return qla24xx_start_scsi(sp);
1747         }
1748
1749         /* Setup device pointers. */
1750         req = vha->req;
1751         rsp = req->rsp;
1752
1753         /* So we know we haven't pci_map'ed anything yet */
1754         tot_dsds = 0;
1755
1756         /* Send marker if required */
1757         if (vha->marker_needed != 0) {
1758                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1759                     QLA_SUCCESS)
1760                         return QLA_FUNCTION_FAILED;
1761                 vha->marker_needed = 0;
1762         }
1763
1764         /* Acquire ring specific lock */
1765         spin_lock_irqsave(&ha->hardware_lock, flags);
1766
1767         handle = qla2xxx_get_next_handle(req);
1768         if (handle == 0)
1769                 goto queuing_error;
1770
1771         /* Compute number of required data segments */
1772         /* Map the sg table so we have an accurate count of sg entries needed */
1773         if (scsi_sg_count(cmd)) {
1774                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1775                     scsi_sg_count(cmd), cmd->sc_data_direction);
1776                 if (unlikely(!nseg))
1777                         goto queuing_error;
1778                 else
1779                         sp->flags |= SRB_DMA_VALID;
1780
1781                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1782                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1783                         struct qla2_sgx sgx;
1784                         uint32_t        partial;
1785
1786                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1787                         sgx.tot_bytes = scsi_bufflen(cmd);
1788                         sgx.cur_sg = scsi_sglist(cmd);
1789                         sgx.sp = sp;
1790
1791                         nseg = 0;
1792                         while (qla24xx_get_one_block_sg(
1793                             cmd->device->sector_size, &sgx, &partial))
1794                                 nseg++;
1795                 }
1796         } else
1797                 nseg = 0;
1798
1799         /* number of required data segments */
1800         tot_dsds = nseg;
1801
1802         /* Compute number of required protection segments */
1803         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1804                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1805                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1806                 if (unlikely(!nseg))
1807                         goto queuing_error;
1808                 else
1809                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1810
1811                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1812                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1813                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1814                 }
1815         } else {
1816                 nseg = 0;
1817         }
1818
1819         req_cnt = 1;
1820         /* Total Data and protection sg segment(s) */
1821         tot_prot_dsds = nseg;
1822         tot_dsds += nseg;
1823         if (req->cnt < (req_cnt + 2)) {
1824                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1825                     RD_REG_DWORD_RELAXED(req->req_q_out);
1826                 if (req->ring_index < cnt)
1827                         req->cnt = cnt - req->ring_index;
1828                 else
1829                         req->cnt = req->length -
1830                                 (req->ring_index - cnt);
1831                 if (req->cnt < (req_cnt + 2))
1832                         goto queuing_error;
1833         }
1834
1835         status |= QDSS_GOT_Q_SPACE;
1836
1837         /* Build header part of command packet (excluding the OPCODE). */
1838         req->current_outstanding_cmd = handle;
1839         req->outstanding_cmds[handle] = sp;
1840         sp->handle = handle;
1841         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1842         req->cnt -= req_cnt;
1843
1844         /* Fill-in common area */
1845         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1846         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1847
1848         clr_ptr = (uint32_t *)cmd_pkt + 2;
1849         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1850
1851         /* Set NPORT-ID and LUN number*/
1852         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1853         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1854         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1855         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1856
1857         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1858         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1859
1860         /* Total Data and protection segment(s) */
1861         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1862
1863         /* Build IOCB segments and adjust for data protection segments */
1864         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1865             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1866                 QLA_SUCCESS)
1867                 goto queuing_error;
1868
1869         cmd_pkt->entry_count = (uint8_t)req_cnt;
1870         /* Specify response queue number where completion should happen */
1871         cmd_pkt->entry_status = (uint8_t) rsp->id;
1872         cmd_pkt->timeout = cpu_to_le16(0);
1873         wmb();
1874
1875         /* Adjust ring index. */
1876         req->ring_index++;
1877         if (req->ring_index == req->length) {
1878                 req->ring_index = 0;
1879                 req->ring_ptr = req->ring;
1880         } else
1881                 req->ring_ptr++;
1882
1883         /* Set chip new ring index. */
1884         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1885
1886         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1887
1888         return QLA_SUCCESS;
1889
1890 queuing_error:
1891         if (status & QDSS_GOT_Q_SPACE) {
1892                 req->outstanding_cmds[handle] = NULL;
1893                 req->cnt += req_cnt;
1894         }
1895         /* Cleanup will be performed by the caller (queuecommand) */
1896
1897         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1898         return QLA_FUNCTION_FAILED;
1899 }
1900
1901 /**
1902  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1903  * @sp: command to send to the ISP
1904  *
1905  * Returns non-zero if a failure occurred, else zero.
1906  */
1907 static int
1908 qla2xxx_start_scsi_mq(srb_t *sp)
1909 {
1910         int             nseg;
1911         unsigned long   flags;
1912         uint32_t        *clr_ptr;
1913         uint32_t        handle;
1914         struct cmd_type_7 *cmd_pkt;
1915         uint16_t        cnt;
1916         uint16_t        req_cnt;
1917         uint16_t        tot_dsds;
1918         struct req_que *req = NULL;
1919         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1920         struct scsi_qla_host *vha = sp->fcport->vha;
1921         struct qla_hw_data *ha = vha->hw;
1922         struct qla_qpair *qpair = sp->qpair;
1923
1924         /* Acquire qpair specific lock */
1925         spin_lock_irqsave(&qpair->qp_lock, flags);
1926
1927         /* Setup qpair pointers */
1928         req = qpair->req;
1929
1930         /* So we know we haven't pci_map'ed anything yet */
1931         tot_dsds = 0;
1932
1933         /* Send marker if required */
1934         if (vha->marker_needed != 0) {
1935                 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1936                     QLA_SUCCESS) {
1937                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1938                         return QLA_FUNCTION_FAILED;
1939                 }
1940                 vha->marker_needed = 0;
1941         }
1942
1943         handle = qla2xxx_get_next_handle(req);
1944         if (handle == 0)
1945                 goto queuing_error;
1946
1947         /* Map the sg table so we have an accurate count of sg entries needed */
1948         if (scsi_sg_count(cmd)) {
1949                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1950                     scsi_sg_count(cmd), cmd->sc_data_direction);
1951                 if (unlikely(!nseg))
1952                         goto queuing_error;
1953         } else
1954                 nseg = 0;
1955
1956         tot_dsds = nseg;
1957         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1958         if (req->cnt < (req_cnt + 2)) {
1959                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1960                     RD_REG_DWORD_RELAXED(req->req_q_out);
1961                 if (req->ring_index < cnt)
1962                         req->cnt = cnt - req->ring_index;
1963                 else
1964                         req->cnt = req->length -
1965                                 (req->ring_index - cnt);
1966                 if (req->cnt < (req_cnt + 2))
1967                         goto queuing_error;
1968         }
1969
1970         /* Build command packet. */
1971         req->current_outstanding_cmd = handle;
1972         req->outstanding_cmds[handle] = sp;
1973         sp->handle = handle;
1974         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1975         req->cnt -= req_cnt;
1976
1977         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1978         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1979
1980         /* Zero out remaining portion of packet. */
1981         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1982         clr_ptr = (uint32_t *)cmd_pkt + 2;
1983         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1984         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1985
1986         /* Set NPORT-ID and LUN number*/
1987         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1988         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1989         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1990         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1991         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1992
1993         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1994         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1995
1996         cmd_pkt->task = TSK_SIMPLE;
1997
1998         /* Load SCSI command packet. */
1999         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2000         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2001
2002         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2003
2004         /* Build IOCB segments */
2005         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2006
2007         /* Set total data segment count. */
2008         cmd_pkt->entry_count = (uint8_t)req_cnt;
2009         wmb();
2010         /* Adjust ring index. */
2011         req->ring_index++;
2012         if (req->ring_index == req->length) {
2013                 req->ring_index = 0;
2014                 req->ring_ptr = req->ring;
2015         } else
2016                 req->ring_ptr++;
2017
2018         sp->flags |= SRB_DMA_VALID;
2019
2020         /* Set chip new ring index. */
2021         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2022
2023         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2024         return QLA_SUCCESS;
2025
2026 queuing_error:
2027         if (tot_dsds)
2028                 scsi_dma_unmap(cmd);
2029
2030         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2031
2032         return QLA_FUNCTION_FAILED;
2033 }
2034
2035
2036 /**
2037  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2038  * @sp: command to send to the ISP
2039  *
2040  * Returns non-zero if a failure occurred, else zero.
2041  */
2042 int
2043 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2044 {
2045         int                     nseg;
2046         unsigned long           flags;
2047         uint32_t                *clr_ptr;
2048         uint32_t                handle;
2049         uint16_t                cnt;
2050         uint16_t                req_cnt = 0;
2051         uint16_t                tot_dsds;
2052         uint16_t                tot_prot_dsds;
2053         uint16_t                fw_prot_opts = 0;
2054         struct req_que          *req = NULL;
2055         struct rsp_que          *rsp = NULL;
2056         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2057         struct scsi_qla_host    *vha = sp->fcport->vha;
2058         struct qla_hw_data      *ha = vha->hw;
2059         struct cmd_type_crc_2   *cmd_pkt;
2060         uint32_t                status = 0;
2061         struct qla_qpair        *qpair = sp->qpair;
2062
2063 #define QDSS_GOT_Q_SPACE        BIT_0
2064
2065         /* Check for host side state */
2066         if (!qpair->online) {
2067                 cmd->result = DID_NO_CONNECT << 16;
2068                 return QLA_INTERFACE_ERROR;
2069         }
2070
2071         if (!qpair->difdix_supported &&
2072                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2073                 cmd->result = DID_NO_CONNECT << 16;
2074                 return QLA_INTERFACE_ERROR;
2075         }
2076
2077         /* Only process protection or >16 cdb in this routine */
2078         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2079                 if (cmd->cmd_len <= 16)
2080                         return qla2xxx_start_scsi_mq(sp);
2081         }
2082
2083         spin_lock_irqsave(&qpair->qp_lock, flags);
2084
2085         /* Setup qpair pointers */
2086         rsp = qpair->rsp;
2087         req = qpair->req;
2088
2089         /* So we know we haven't pci_map'ed anything yet */
2090         tot_dsds = 0;
2091
2092         /* Send marker if required */
2093         if (vha->marker_needed != 0) {
2094                 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2095                     QLA_SUCCESS) {
2096                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2097                         return QLA_FUNCTION_FAILED;
2098                 }
2099                 vha->marker_needed = 0;
2100         }
2101
2102         handle = qla2xxx_get_next_handle(req);
2103         if (handle == 0)
2104                 goto queuing_error;
2105
2106         /* Compute number of required data segments */
2107         /* Map the sg table so we have an accurate count of sg entries needed */
2108         if (scsi_sg_count(cmd)) {
2109                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2110                     scsi_sg_count(cmd), cmd->sc_data_direction);
2111                 if (unlikely(!nseg))
2112                         goto queuing_error;
2113                 else
2114                         sp->flags |= SRB_DMA_VALID;
2115
2116                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2117                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2118                         struct qla2_sgx sgx;
2119                         uint32_t        partial;
2120
2121                         memset(&sgx, 0, sizeof(struct qla2_sgx));
2122                         sgx.tot_bytes = scsi_bufflen(cmd);
2123                         sgx.cur_sg = scsi_sglist(cmd);
2124                         sgx.sp = sp;
2125
2126                         nseg = 0;
2127                         while (qla24xx_get_one_block_sg(
2128                             cmd->device->sector_size, &sgx, &partial))
2129                                 nseg++;
2130                 }
2131         } else
2132                 nseg = 0;
2133
2134         /* number of required data segments */
2135         tot_dsds = nseg;
2136
2137         /* Compute number of required protection segments */
2138         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2139                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2140                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2141                 if (unlikely(!nseg))
2142                         goto queuing_error;
2143                 else
2144                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2145
2146                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2147                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2148                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2149                 }
2150         } else {
2151                 nseg = 0;
2152         }
2153
2154         req_cnt = 1;
2155         /* Total Data and protection sg segment(s) */
2156         tot_prot_dsds = nseg;
2157         tot_dsds += nseg;
2158         if (req->cnt < (req_cnt + 2)) {
2159                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2160                     RD_REG_DWORD_RELAXED(req->req_q_out);
2161                 if (req->ring_index < cnt)
2162                         req->cnt = cnt - req->ring_index;
2163                 else
2164                         req->cnt = req->length -
2165                                 (req->ring_index - cnt);
2166                 if (req->cnt < (req_cnt + 2))
2167                         goto queuing_error;
2168         }
2169
2170         status |= QDSS_GOT_Q_SPACE;
2171
2172         /* Build header part of command packet (excluding the OPCODE). */
2173         req->current_outstanding_cmd = handle;
2174         req->outstanding_cmds[handle] = sp;
2175         sp->handle = handle;
2176         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2177         req->cnt -= req_cnt;
2178
2179         /* Fill-in common area */
2180         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2181         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2182
2183         clr_ptr = (uint32_t *)cmd_pkt + 2;
2184         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2185
2186         /* Set NPORT-ID and LUN number*/
2187         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2188         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2189         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2190         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2191
2192         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2193         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2194
2195         /* Total Data and protection segment(s) */
2196         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2197
2198         /* Build IOCB segments and adjust for data protection segments */
2199         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2200             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2201                 QLA_SUCCESS)
2202                 goto queuing_error;
2203
2204         cmd_pkt->entry_count = (uint8_t)req_cnt;
2205         cmd_pkt->timeout = cpu_to_le16(0);
2206         wmb();
2207
2208         /* Adjust ring index. */
2209         req->ring_index++;
2210         if (req->ring_index == req->length) {
2211                 req->ring_index = 0;
2212                 req->ring_ptr = req->ring;
2213         } else
2214                 req->ring_ptr++;
2215
2216         /* Set chip new ring index. */
2217         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2218
2219         /* Manage unprocessed RIO/ZIO commands in response queue. */
2220         if (vha->flags.process_response_queue &&
2221             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2222                 qla24xx_process_response_queue(vha, rsp);
2223
2224         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2225
2226         return QLA_SUCCESS;
2227
2228 queuing_error:
2229         if (status & QDSS_GOT_Q_SPACE) {
2230                 req->outstanding_cmds[handle] = NULL;
2231                 req->cnt += req_cnt;
2232         }
2233         /* Cleanup will be performed by the caller (queuecommand) */
2234
2235         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2236         return QLA_FUNCTION_FAILED;
2237 }
2238
2239 /* Generic Control-SRB manipulation functions. */
2240
2241 /* hardware_lock assumed to be held. */
2242
2243 void *
2244 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2245 {
2246         scsi_qla_host_t *vha = qpair->vha;
2247         struct qla_hw_data *ha = vha->hw;
2248         struct req_que *req = qpair->req;
2249         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2250         uint32_t handle;
2251         request_t *pkt;
2252         uint16_t cnt, req_cnt;
2253
2254         pkt = NULL;
2255         req_cnt = 1;
2256         handle = 0;
2257
2258         if (sp && (sp->type != SRB_SCSI_CMD)) {
2259                 /* Adjust entry-counts as needed. */
2260                 req_cnt = sp->iocbs;
2261         }
2262
2263         /* Check for room on request queue. */
2264         if (req->cnt < req_cnt + 2) {
2265                 if (qpair->use_shadow_reg)
2266                         cnt = *req->out_ptr;
2267                 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2268                     IS_QLA28XX(ha))
2269                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2270                 else if (IS_P3P_TYPE(ha))
2271                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2272                 else if (IS_FWI2_CAPABLE(ha))
2273                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2274                 else if (IS_QLAFX00(ha))
2275                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2276                 else
2277                         cnt = qla2x00_debounce_register(
2278                             ISP_REQ_Q_OUT(ha, &reg->isp));
2279
2280                 if  (req->ring_index < cnt)
2281                         req->cnt = cnt - req->ring_index;
2282                 else
2283                         req->cnt = req->length -
2284                             (req->ring_index - cnt);
2285         }
2286         if (req->cnt < req_cnt + 2)
2287                 goto queuing_error;
2288
2289         if (sp) {
2290                 handle = qla2xxx_get_next_handle(req);
2291                 if (handle == 0) {
2292                         ql_log(ql_log_warn, vha, 0x700b,
2293                             "No room on outstanding cmd array.\n");
2294                         goto queuing_error;
2295                 }
2296
2297                 /* Prep command array. */
2298                 req->current_outstanding_cmd = handle;
2299                 req->outstanding_cmds[handle] = sp;
2300                 sp->handle = handle;
2301         }
2302
2303         /* Prep packet */
2304         req->cnt -= req_cnt;
2305         pkt = req->ring_ptr;
2306         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2307         if (IS_QLAFX00(ha)) {
2308                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2309                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2310         } else {
2311                 pkt->entry_count = req_cnt;
2312                 pkt->handle = handle;
2313         }
2314
2315         return pkt;
2316
2317 queuing_error:
2318         qpair->tgt_counters.num_alloc_iocb_failed++;
2319         return pkt;
2320 }
2321
2322 void *
2323 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2324 {
2325         scsi_qla_host_t *vha = qpair->vha;
2326
2327         if (qla2x00_reset_active(vha))
2328                 return NULL;
2329
2330         return __qla2x00_alloc_iocbs(qpair, sp);
2331 }
2332
2333 void *
2334 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2335 {
2336         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2337 }
2338
2339 static void
2340 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2341 {
2342         struct srb_iocb *lio = &sp->u.iocb_cmd;
2343
2344         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2345         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2346         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2347                 logio->control_flags |= LCF_NVME_PRLI;
2348                 if (sp->vha->flags.nvme_first_burst)
2349                         logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2350         }
2351
2352         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2353         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2354         logio->port_id[1] = sp->fcport->d_id.b.area;
2355         logio->port_id[2] = sp->fcport->d_id.b.domain;
2356         logio->vp_index = sp->vha->vp_idx;
2357 }
2358
2359 static void
2360 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2361 {
2362         struct srb_iocb *lio = &sp->u.iocb_cmd;
2363
2364         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2365         if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2366                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2367         } else {
2368                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2369                 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2370                         logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2371                 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2372                         logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2373         }
2374         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2375         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2376         logio->port_id[1] = sp->fcport->d_id.b.area;
2377         logio->port_id[2] = sp->fcport->d_id.b.domain;
2378         logio->vp_index = sp->vha->vp_idx;
2379 }
2380
2381 static void
2382 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2383 {
2384         struct qla_hw_data *ha = sp->vha->hw;
2385         struct srb_iocb *lio = &sp->u.iocb_cmd;
2386         uint16_t opts;
2387
2388         mbx->entry_type = MBX_IOCB_TYPE;
2389         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2390         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2391         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2392         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2393         if (HAS_EXTENDED_IDS(ha)) {
2394                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2395                 mbx->mb10 = cpu_to_le16(opts);
2396         } else {
2397                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2398         }
2399         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2400         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2401             sp->fcport->d_id.b.al_pa);
2402         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2403 }
2404
2405 static void
2406 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2407 {
2408         u16 control_flags = LCF_COMMAND_LOGO;
2409         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2410
2411         if (sp->fcport->explicit_logout) {
2412                 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2413         } else {
2414                 control_flags |= LCF_IMPL_LOGO;
2415
2416                 if (!sp->fcport->keep_nport_handle)
2417                         control_flags |= LCF_FREE_NPORT;
2418         }
2419
2420         logio->control_flags = cpu_to_le16(control_flags);
2421         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2422         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2423         logio->port_id[1] = sp->fcport->d_id.b.area;
2424         logio->port_id[2] = sp->fcport->d_id.b.domain;
2425         logio->vp_index = sp->vha->vp_idx;
2426 }
2427
2428 static void
2429 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2430 {
2431         struct qla_hw_data *ha = sp->vha->hw;
2432
2433         mbx->entry_type = MBX_IOCB_TYPE;
2434         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2435         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2436         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2437             cpu_to_le16(sp->fcport->loop_id) :
2438             cpu_to_le16(sp->fcport->loop_id << 8);
2439         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2440         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2441             sp->fcport->d_id.b.al_pa);
2442         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2443         /* Implicit: mbx->mbx10 = 0. */
2444 }
2445
2446 static void
2447 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2448 {
2449         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2450         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2451         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2452         logio->vp_index = sp->vha->vp_idx;
2453 }
2454
2455 static void
2456 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2457 {
2458         struct qla_hw_data *ha = sp->vha->hw;
2459
2460         mbx->entry_type = MBX_IOCB_TYPE;
2461         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2462         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2463         if (HAS_EXTENDED_IDS(ha)) {
2464                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2465                 mbx->mb10 = cpu_to_le16(BIT_0);
2466         } else {
2467                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2468         }
2469         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2470         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2471         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2472         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2473         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2474 }
2475
2476 static void
2477 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2478 {
2479         uint32_t flags;
2480         uint64_t lun;
2481         struct fc_port *fcport = sp->fcport;
2482         scsi_qla_host_t *vha = fcport->vha;
2483         struct qla_hw_data *ha = vha->hw;
2484         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2485         struct req_que *req = vha->req;
2486
2487         flags = iocb->u.tmf.flags;
2488         lun = iocb->u.tmf.lun;
2489
2490         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2491         tsk->entry_count = 1;
2492         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2493         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2494         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2495         tsk->control_flags = cpu_to_le32(flags);
2496         tsk->port_id[0] = fcport->d_id.b.al_pa;
2497         tsk->port_id[1] = fcport->d_id.b.area;
2498         tsk->port_id[2] = fcport->d_id.b.domain;
2499         tsk->vp_index = fcport->vha->vp_idx;
2500
2501         if (flags == TCF_LUN_RESET) {
2502                 int_to_scsilun(lun, &tsk->lun);
2503                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2504                         sizeof(tsk->lun));
2505         }
2506 }
2507
2508 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2509 {
2510         timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2511         sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2512         sp->free = qla2x00_sp_free;
2513         if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2514                 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2515         sp->start_timer = 1;
2516 }
2517
2518 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2519 {
2520         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2521
2522         kfree(sp->fcport);
2523
2524         if (elsio->u.els_logo.els_logo_pyld)
2525                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2526                     elsio->u.els_logo.els_logo_pyld,
2527                     elsio->u.els_logo.els_logo_pyld_dma);
2528
2529         del_timer(&elsio->timer);
2530         qla2x00_rel_sp(sp);
2531 }
2532
2533 static void
2534 qla2x00_els_dcmd_iocb_timeout(void *data)
2535 {
2536         srb_t *sp = data;
2537         fc_port_t *fcport = sp->fcport;
2538         struct scsi_qla_host *vha = sp->vha;
2539         struct srb_iocb *lio = &sp->u.iocb_cmd;
2540
2541         ql_dbg(ql_dbg_io, vha, 0x3069,
2542             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2543             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2544             fcport->d_id.b.al_pa);
2545
2546         complete(&lio->u.els_logo.comp);
2547 }
2548
2549 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2550 {
2551         fc_port_t *fcport = sp->fcport;
2552         struct srb_iocb *lio = &sp->u.iocb_cmd;
2553         struct scsi_qla_host *vha = sp->vha;
2554
2555         ql_dbg(ql_dbg_io, vha, 0x3072,
2556             "%s hdl=%x, portid=%02x%02x%02x done\n",
2557             sp->name, sp->handle, fcport->d_id.b.domain,
2558             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2559
2560         complete(&lio->u.els_logo.comp);
2561 }
2562
2563 int
2564 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2565     port_id_t remote_did)
2566 {
2567         srb_t *sp;
2568         fc_port_t *fcport = NULL;
2569         struct srb_iocb *elsio = NULL;
2570         struct qla_hw_data *ha = vha->hw;
2571         struct els_logo_payload logo_pyld;
2572         int rval = QLA_SUCCESS;
2573
2574         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2575         if (!fcport) {
2576                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2577                return -ENOMEM;
2578         }
2579
2580         /* Alloc SRB structure */
2581         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2582         if (!sp) {
2583                 kfree(fcport);
2584                 ql_log(ql_log_info, vha, 0x70e6,
2585                  "SRB allocation failed\n");
2586                 return -ENOMEM;
2587         }
2588
2589         elsio = &sp->u.iocb_cmd;
2590         fcport->loop_id = 0xFFFF;
2591         fcport->d_id.b.domain = remote_did.b.domain;
2592         fcport->d_id.b.area = remote_did.b.area;
2593         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2594
2595         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2596             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2597
2598         sp->type = SRB_ELS_DCMD;
2599         sp->name = "ELS_DCMD";
2600         sp->fcport = fcport;
2601         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2602         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2603         init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2604         sp->done = qla2x00_els_dcmd_sp_done;
2605         sp->free = qla2x00_els_dcmd_sp_free;
2606
2607         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2608                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2609                             GFP_KERNEL);
2610
2611         if (!elsio->u.els_logo.els_logo_pyld) {
2612                 sp->free(sp);
2613                 return QLA_FUNCTION_FAILED;
2614         }
2615
2616         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2617
2618         elsio->u.els_logo.els_cmd = els_opcode;
2619         logo_pyld.opcode = els_opcode;
2620         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2621         logo_pyld.s_id[1] = vha->d_id.b.area;
2622         logo_pyld.s_id[2] = vha->d_id.b.domain;
2623         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2624         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2625
2626         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2627             sizeof(struct els_logo_payload));
2628         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2629         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2630                        elsio->u.els_logo.els_logo_pyld,
2631                        sizeof(*elsio->u.els_logo.els_logo_pyld));
2632
2633         rval = qla2x00_start_sp(sp);
2634         if (rval != QLA_SUCCESS) {
2635                 sp->free(sp);
2636                 return QLA_FUNCTION_FAILED;
2637         }
2638
2639         ql_dbg(ql_dbg_io, vha, 0x3074,
2640             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2641             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2642             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2643
2644         wait_for_completion(&elsio->u.els_logo.comp);
2645
2646         sp->free(sp);
2647         return rval;
2648 }
2649
2650 static void
2651 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2652 {
2653         scsi_qla_host_t *vha = sp->vha;
2654         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2655
2656         els_iocb->entry_type = ELS_IOCB_TYPE;
2657         els_iocb->entry_count = 1;
2658         els_iocb->sys_define = 0;
2659         els_iocb->entry_status = 0;
2660         els_iocb->handle = sp->handle;
2661         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2662         els_iocb->tx_dsd_count = 1;
2663         els_iocb->vp_index = vha->vp_idx;
2664         els_iocb->sof_type = EST_SOFI3;
2665         els_iocb->rx_dsd_count = 0;
2666         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2667
2668         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2669         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2670         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2671         /* For SID the byte order is different than DID */
2672         els_iocb->s_id[1] = vha->d_id.b.al_pa;
2673         els_iocb->s_id[2] = vha->d_id.b.area;
2674         els_iocb->s_id[0] = vha->d_id.b.domain;
2675
2676         if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2677                 els_iocb->control_flags = 0;
2678                 els_iocb->tx_byte_count = els_iocb->tx_len =
2679                         cpu_to_le32(sizeof(struct els_plogi_payload));
2680                 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2681                                    &els_iocb->tx_address);
2682                 els_iocb->rx_dsd_count = 1;
2683                 els_iocb->rx_byte_count = els_iocb->rx_len =
2684                         cpu_to_le32(sizeof(struct els_plogi_payload));
2685                 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2686                                    &els_iocb->rx_address);
2687
2688                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2689                     "PLOGI ELS IOCB:\n");
2690                 ql_dump_buffer(ql_log_info, vha, 0x0109,
2691                     (uint8_t *)els_iocb,
2692                     sizeof(*els_iocb));
2693         } else {
2694                 els_iocb->control_flags = 1 << 13;
2695                 els_iocb->tx_byte_count =
2696                         cpu_to_le32(sizeof(struct els_logo_payload));
2697                 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2698                                    &els_iocb->tx_address);
2699                 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2700
2701                 els_iocb->rx_byte_count = 0;
2702                 els_iocb->rx_address = 0;
2703                 els_iocb->rx_len = 0;
2704                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2705                        "LOGO ELS IOCB:");
2706                 ql_dump_buffer(ql_log_info, vha, 0x010b,
2707                                els_iocb,
2708                                sizeof(*els_iocb));
2709         }
2710
2711         sp->vha->qla_stats.control_requests++;
2712 }
2713
2714 static void
2715 qla2x00_els_dcmd2_iocb_timeout(void *data)
2716 {
2717         srb_t *sp = data;
2718         fc_port_t *fcport = sp->fcport;
2719         struct scsi_qla_host *vha = sp->vha;
2720         struct qla_hw_data *ha = vha->hw;
2721         unsigned long flags = 0;
2722         int res;
2723
2724         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2725             "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2726             sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2727
2728         /* Abort the exchange */
2729         spin_lock_irqsave(&ha->hardware_lock, flags);
2730         res = ha->isp_ops->abort_command(sp);
2731         ql_dbg(ql_dbg_io, vha, 0x3070,
2732             "mbx abort_command %s\n",
2733             (res == QLA_SUCCESS) ? "successful" : "failed");
2734         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2735
2736         sp->done(sp, QLA_FUNCTION_TIMEOUT);
2737 }
2738
2739 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2740 {
2741         if (els_plogi->els_plogi_pyld)
2742                 dma_free_coherent(&vha->hw->pdev->dev,
2743                                   els_plogi->tx_size,
2744                                   els_plogi->els_plogi_pyld,
2745                                   els_plogi->els_plogi_pyld_dma);
2746
2747         if (els_plogi->els_resp_pyld)
2748                 dma_free_coherent(&vha->hw->pdev->dev,
2749                                   els_plogi->rx_size,
2750                                   els_plogi->els_resp_pyld,
2751                                   els_plogi->els_resp_pyld_dma);
2752 }
2753
2754 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2755 {
2756         fc_port_t *fcport = sp->fcport;
2757         struct srb_iocb *lio = &sp->u.iocb_cmd;
2758         struct scsi_qla_host *vha = sp->vha;
2759         struct event_arg ea;
2760         struct qla_work_evt *e;
2761         struct fc_port *conflict_fcport;
2762         port_id_t cid;  /* conflict Nport id */
2763         u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2764         u16 lid;
2765
2766         ql_dbg(ql_dbg_disc, vha, 0x3072,
2767             "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2768             sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2769
2770         fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2771         del_timer(&sp->u.iocb_cmd.timer);
2772
2773         if (sp->flags & SRB_WAKEUP_ON_COMP)
2774                 complete(&lio->u.els_plogi.comp);
2775         else {
2776                 switch (fw_status[0]) {
2777                 case CS_DATA_UNDERRUN:
2778                 case CS_COMPLETE:
2779                         memset(&ea, 0, sizeof(ea));
2780                         ea.fcport = fcport;
2781                         ea.rc = res;
2782                         qla_handle_els_plogi_done(vha, &ea);
2783                         break;
2784
2785                 case CS_IOCB_ERROR:
2786                         switch (fw_status[1]) {
2787                         case LSC_SCODE_PORTID_USED:
2788                                 lid = fw_status[2] & 0xffff;
2789                                 qlt_find_sess_invalidate_other(vha,
2790                                     wwn_to_u64(fcport->port_name),
2791                                     fcport->d_id, lid, &conflict_fcport);
2792                                 if (conflict_fcport) {
2793                                         /*
2794                                          * Another fcport shares the same
2795                                          * loop_id & nport id; conflict
2796                                          * fcport needs to finish cleanup
2797                                          * before this fcport can proceed
2798                                          * to login.
2799                                          */
2800                                         conflict_fcport->conflict = fcport;
2801                                         fcport->login_pause = 1;
2802                                         ql_dbg(ql_dbg_disc, vha, 0x20ed,
2803                                             "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2804                                             __func__, __LINE__,
2805                                             fcport->port_name,
2806                                             fcport->d_id.b24, lid);
2807                                 } else {
2808                                         ql_dbg(ql_dbg_disc, vha, 0x20ed,
2809                                             "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2810                                             __func__, __LINE__,
2811                                             fcport->port_name,
2812                                             fcport->d_id.b24, lid);
2813                                         qla2x00_clear_loop_id(fcport);
2814                                         set_bit(lid, vha->hw->loop_id_map);
2815                                         fcport->loop_id = lid;
2816                                         fcport->keep_nport_handle = 0;
2817                                         qlt_schedule_sess_for_deletion(fcport);
2818                                 }
2819                                 break;
2820
2821                         case LSC_SCODE_NPORT_USED:
2822                                 cid.b.domain = (fw_status[2] >> 16) & 0xff;
2823                                 cid.b.area   = (fw_status[2] >>  8) & 0xff;
2824                                 cid.b.al_pa  = fw_status[2] & 0xff;
2825                                 cid.b.rsvd_1 = 0;
2826
2827                                 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2828                                     "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2829                                     __func__, __LINE__, fcport->port_name,
2830                                     fcport->loop_id, cid.b24);
2831                                 set_bit(fcport->loop_id,
2832                                     vha->hw->loop_id_map);
2833                                 fcport->loop_id = FC_NO_LOOP_ID;
2834                                 qla24xx_post_gnl_work(vha, fcport);
2835                                 break;
2836
2837                         case LSC_SCODE_NOXCB:
2838                                 vha->hw->exch_starvation++;
2839                                 if (vha->hw->exch_starvation > 5) {
2840                                         ql_log(ql_log_warn, vha, 0xd046,
2841                                             "Exchange starvation. Resetting RISC\n");
2842                                         vha->hw->exch_starvation = 0;
2843                                         set_bit(ISP_ABORT_NEEDED,
2844                                             &vha->dpc_flags);
2845                                         qla2xxx_wake_dpc(vha);
2846                                 }
2847                                 /* fall through */
2848                         default:
2849                                 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2850                                     "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2851                                     __func__, sp->fcport->port_name,
2852                                     fw_status[0], fw_status[1], fw_status[2]);
2853
2854                                 fcport->flags &= ~FCF_ASYNC_SENT;
2855                                 fcport->disc_state = DSC_LOGIN_FAILED;
2856                                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2857                                 break;
2858                         }
2859                         break;
2860
2861                 default:
2862                         ql_dbg(ql_dbg_disc, vha, 0x20eb,
2863                             "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2864                             __func__, sp->fcport->port_name,
2865                             fw_status[0], fw_status[1], fw_status[2]);
2866
2867                         sp->fcport->flags &= ~FCF_ASYNC_SENT;
2868                         sp->fcport->disc_state = DSC_LOGIN_FAILED;
2869                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2870                         break;
2871                 }
2872
2873                 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2874                 if (!e) {
2875                         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2876
2877                         qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2878                         sp->free(sp);
2879                         return;
2880                 }
2881                 e->u.iosb.sp = sp;
2882                 qla2x00_post_work(vha, e);
2883         }
2884 }
2885
2886 int
2887 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2888     fc_port_t *fcport, bool wait)
2889 {
2890         srb_t *sp;
2891         struct srb_iocb *elsio = NULL;
2892         struct qla_hw_data *ha = vha->hw;
2893         int rval = QLA_SUCCESS;
2894         void    *ptr, *resp_ptr;
2895
2896         /* Alloc SRB structure */
2897         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2898         if (!sp) {
2899                 ql_log(ql_log_info, vha, 0x70e6,
2900                  "SRB allocation failed\n");
2901                 return -ENOMEM;
2902         }
2903
2904         fcport->flags |= FCF_ASYNC_SENT;
2905         fcport->disc_state = DSC_LOGIN_PEND;
2906         elsio = &sp->u.iocb_cmd;
2907         ql_dbg(ql_dbg_io, vha, 0x3073,
2908             "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2909
2910         sp->type = SRB_ELS_DCMD;
2911         sp->name = "ELS_DCMD";
2912         sp->fcport = fcport;
2913
2914         elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2915         init_completion(&elsio->u.els_plogi.comp);
2916         if (wait)
2917                 sp->flags = SRB_WAKEUP_ON_COMP;
2918
2919         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2920
2921         sp->done = qla2x00_els_dcmd2_sp_done;
2922         elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2923
2924         ptr = elsio->u.els_plogi.els_plogi_pyld =
2925             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2926                 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2927
2928         if (!elsio->u.els_plogi.els_plogi_pyld) {
2929                 rval = QLA_FUNCTION_FAILED;
2930                 goto out;
2931         }
2932
2933         resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2934             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2935                 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2936
2937         if (!elsio->u.els_plogi.els_resp_pyld) {
2938                 rval = QLA_FUNCTION_FAILED;
2939                 goto out;
2940         }
2941
2942         ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2943
2944         memset(ptr, 0, sizeof(struct els_plogi_payload));
2945         memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2946         memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2947             &ha->plogi_els_payld.data,
2948             sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2949
2950         elsio->u.els_plogi.els_cmd = els_opcode;
2951         elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2952
2953         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2954         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2955             (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
2956             sizeof(*elsio->u.els_plogi.els_plogi_pyld));
2957
2958         rval = qla2x00_start_sp(sp);
2959         if (rval != QLA_SUCCESS) {
2960                 rval = QLA_FUNCTION_FAILED;
2961         } else {
2962                 ql_dbg(ql_dbg_disc, vha, 0x3074,
2963                     "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2964                     sp->name, sp->handle, fcport->loop_id,
2965                     fcport->d_id.b24, vha->d_id.b24);
2966         }
2967
2968         if (wait) {
2969                 wait_for_completion(&elsio->u.els_plogi.comp);
2970
2971                 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2972                         rval = QLA_FUNCTION_FAILED;
2973         } else {
2974                 goto done;
2975         }
2976
2977 out:
2978         fcport->flags &= ~(FCF_ASYNC_SENT);
2979         qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2980         sp->free(sp);
2981 done:
2982         return rval;
2983 }
2984
2985 static void
2986 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2987 {
2988         struct bsg_job *bsg_job = sp->u.bsg_job;
2989         struct fc_bsg_request *bsg_request = bsg_job->request;
2990
2991         els_iocb->entry_type = ELS_IOCB_TYPE;
2992         els_iocb->entry_count = 1;
2993         els_iocb->sys_define = 0;
2994         els_iocb->entry_status = 0;
2995         els_iocb->handle = sp->handle;
2996         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2997         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2998         els_iocb->vp_index = sp->vha->vp_idx;
2999         els_iocb->sof_type = EST_SOFI3;
3000         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3001
3002         els_iocb->opcode =
3003             sp->type == SRB_ELS_CMD_RPT ?
3004             bsg_request->rqst_data.r_els.els_code :
3005             bsg_request->rqst_data.h_els.command_code;
3006         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3007         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
3008         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3009         els_iocb->control_flags = 0;
3010         els_iocb->rx_byte_count =
3011             cpu_to_le32(bsg_job->reply_payload.payload_len);
3012         els_iocb->tx_byte_count =
3013             cpu_to_le32(bsg_job->request_payload.payload_len);
3014
3015         put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3016                            &els_iocb->tx_address);
3017         els_iocb->tx_len = cpu_to_le32(sg_dma_len
3018             (bsg_job->request_payload.sg_list));
3019
3020         put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3021                            &els_iocb->rx_address);
3022         els_iocb->rx_len = cpu_to_le32(sg_dma_len
3023             (bsg_job->reply_payload.sg_list));
3024
3025         sp->vha->qla_stats.control_requests++;
3026 }
3027
3028 static void
3029 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3030 {
3031         uint16_t        avail_dsds;
3032         struct dsd64    *cur_dsd;
3033         struct scatterlist *sg;
3034         int index;
3035         uint16_t tot_dsds;
3036         scsi_qla_host_t *vha = sp->vha;
3037         struct qla_hw_data *ha = vha->hw;
3038         struct bsg_job *bsg_job = sp->u.bsg_job;
3039         int entry_count = 1;
3040
3041         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3042         ct_iocb->entry_type = CT_IOCB_TYPE;
3043         ct_iocb->entry_status = 0;
3044         ct_iocb->handle1 = sp->handle;
3045         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3046         ct_iocb->status = cpu_to_le16(0);
3047         ct_iocb->control_flags = cpu_to_le16(0);
3048         ct_iocb->timeout = 0;
3049         ct_iocb->cmd_dsd_count =
3050             cpu_to_le16(bsg_job->request_payload.sg_cnt);
3051         ct_iocb->total_dsd_count =
3052             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3053         ct_iocb->req_bytecount =
3054             cpu_to_le32(bsg_job->request_payload.payload_len);
3055         ct_iocb->rsp_bytecount =
3056             cpu_to_le32(bsg_job->reply_payload.payload_len);
3057
3058         put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3059                            &ct_iocb->req_dsd.address);
3060         ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3061
3062         put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3063                            &ct_iocb->rsp_dsd.address);
3064         ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3065
3066         avail_dsds = 1;
3067         cur_dsd = &ct_iocb->rsp_dsd;
3068         index = 0;
3069         tot_dsds = bsg_job->reply_payload.sg_cnt;
3070
3071         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3072                 cont_a64_entry_t *cont_pkt;
3073
3074                 /* Allocate additional continuation packets? */
3075                 if (avail_dsds == 0) {
3076                         /*
3077                         * Five DSDs are available in the Cont.
3078                         * Type 1 IOCB.
3079                                */
3080                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3081                             vha->hw->req_q_map[0]);
3082                         cur_dsd = cont_pkt->dsd;
3083                         avail_dsds = 5;
3084                         entry_count++;
3085                 }
3086
3087                 append_dsd64(&cur_dsd, sg);
3088                 avail_dsds--;
3089         }
3090         ct_iocb->entry_count = entry_count;
3091
3092         sp->vha->qla_stats.control_requests++;
3093 }
3094
3095 static void
3096 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3097 {
3098         uint16_t        avail_dsds;
3099         struct dsd64    *cur_dsd;
3100         struct scatterlist *sg;
3101         int index;
3102         uint16_t cmd_dsds, rsp_dsds;
3103         scsi_qla_host_t *vha = sp->vha;
3104         struct qla_hw_data *ha = vha->hw;
3105         struct bsg_job *bsg_job = sp->u.bsg_job;
3106         int entry_count = 1;
3107         cont_a64_entry_t *cont_pkt = NULL;
3108
3109         ct_iocb->entry_type = CT_IOCB_TYPE;
3110         ct_iocb->entry_status = 0;
3111         ct_iocb->sys_define = 0;
3112         ct_iocb->handle = sp->handle;
3113
3114         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3115         ct_iocb->vp_index = sp->vha->vp_idx;
3116         ct_iocb->comp_status = cpu_to_le16(0);
3117
3118         cmd_dsds = bsg_job->request_payload.sg_cnt;
3119         rsp_dsds = bsg_job->reply_payload.sg_cnt;
3120
3121         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3122         ct_iocb->timeout = 0;
3123         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3124         ct_iocb->cmd_byte_count =
3125             cpu_to_le32(bsg_job->request_payload.payload_len);
3126
3127         avail_dsds = 2;
3128         cur_dsd = ct_iocb->dsd;
3129         index = 0;
3130
3131         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3132                 /* Allocate additional continuation packets? */
3133                 if (avail_dsds == 0) {
3134                         /*
3135                          * Five DSDs are available in the Cont.
3136                          * Type 1 IOCB.
3137                          */
3138                         cont_pkt = qla2x00_prep_cont_type1_iocb(
3139                             vha, ha->req_q_map[0]);
3140                         cur_dsd = cont_pkt->dsd;
3141                         avail_dsds = 5;
3142                         entry_count++;
3143                 }
3144
3145                 append_dsd64(&cur_dsd, sg);
3146                 avail_dsds--;
3147         }
3148
3149         index = 0;
3150
3151         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3152                 /* Allocate additional continuation packets? */
3153                 if (avail_dsds == 0) {
3154                         /*
3155                         * Five DSDs are available in the Cont.
3156                         * Type 1 IOCB.
3157                                */
3158                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3159                             ha->req_q_map[0]);
3160                         cur_dsd = cont_pkt->dsd;
3161                         avail_dsds = 5;
3162                         entry_count++;
3163                 }
3164
3165                 append_dsd64(&cur_dsd, sg);
3166                 avail_dsds--;
3167         }
3168         ct_iocb->entry_count = entry_count;
3169 }
3170
3171 /*
3172  * qla82xx_start_scsi() - Send a SCSI command to the ISP
3173  * @sp: command to send to the ISP
3174  *
3175  * Returns non-zero if a failure occurred, else zero.
3176  */
3177 int
3178 qla82xx_start_scsi(srb_t *sp)
3179 {
3180         int             nseg;
3181         unsigned long   flags;
3182         struct scsi_cmnd *cmd;
3183         uint32_t        *clr_ptr;
3184         uint32_t        handle;
3185         uint16_t        cnt;
3186         uint16_t        req_cnt;
3187         uint16_t        tot_dsds;
3188         struct device_reg_82xx __iomem *reg;
3189         uint32_t dbval;
3190         uint32_t *fcp_dl;
3191         uint8_t additional_cdb_len;
3192         struct ct6_dsd *ctx;
3193         struct scsi_qla_host *vha = sp->vha;
3194         struct qla_hw_data *ha = vha->hw;
3195         struct req_que *req = NULL;
3196         struct rsp_que *rsp = NULL;
3197
3198         /* Setup device pointers. */
3199         reg = &ha->iobase->isp82;
3200         cmd = GET_CMD_SP(sp);
3201         req = vha->req;
3202         rsp = ha->rsp_q_map[0];
3203
3204         /* So we know we haven't pci_map'ed anything yet */
3205         tot_dsds = 0;
3206
3207         dbval = 0x04 | (ha->portnum << 5);
3208
3209         /* Send marker if required */
3210         if (vha->marker_needed != 0) {
3211                 if (qla2x00_marker(vha, ha->base_qpair,
3212                         0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3213                         ql_log(ql_log_warn, vha, 0x300c,
3214                             "qla2x00_marker failed for cmd=%p.\n", cmd);
3215                         return QLA_FUNCTION_FAILED;
3216                 }
3217                 vha->marker_needed = 0;
3218         }
3219
3220         /* Acquire ring specific lock */
3221         spin_lock_irqsave(&ha->hardware_lock, flags);
3222
3223         handle = qla2xxx_get_next_handle(req);
3224         if (handle == 0)
3225                 goto queuing_error;
3226
3227         /* Map the sg table so we have an accurate count of sg entries needed */
3228         if (scsi_sg_count(cmd)) {
3229                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3230                     scsi_sg_count(cmd), cmd->sc_data_direction);
3231                 if (unlikely(!nseg))
3232                         goto queuing_error;
3233         } else
3234                 nseg = 0;
3235
3236         tot_dsds = nseg;
3237
3238         if (tot_dsds > ql2xshiftctondsd) {
3239                 struct cmd_type_6 *cmd_pkt;
3240                 uint16_t more_dsd_lists = 0;
3241                 struct dsd_dma *dsd_ptr;
3242                 uint16_t i;
3243
3244                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3245                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3246                         ql_dbg(ql_dbg_io, vha, 0x300d,
3247                             "Num of DSD list %d is than %d for cmd=%p.\n",
3248                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3249                             cmd);
3250                         goto queuing_error;
3251                 }
3252
3253                 if (more_dsd_lists <= ha->gbl_dsd_avail)
3254                         goto sufficient_dsds;
3255                 else
3256                         more_dsd_lists -= ha->gbl_dsd_avail;
3257
3258                 for (i = 0; i < more_dsd_lists; i++) {
3259                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3260                         if (!dsd_ptr) {
3261                                 ql_log(ql_log_fatal, vha, 0x300e,
3262                                     "Failed to allocate memory for dsd_dma "
3263                                     "for cmd=%p.\n", cmd);
3264                                 goto queuing_error;
3265                         }
3266
3267                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3268                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3269                         if (!dsd_ptr->dsd_addr) {
3270                                 kfree(dsd_ptr);
3271                                 ql_log(ql_log_fatal, vha, 0x300f,
3272                                     "Failed to allocate memory for dsd_addr "
3273                                     "for cmd=%p.\n", cmd);
3274                                 goto queuing_error;
3275                         }
3276                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3277                         ha->gbl_dsd_avail++;
3278                 }
3279
3280 sufficient_dsds:
3281                 req_cnt = 1;
3282
3283                 if (req->cnt < (req_cnt + 2)) {
3284                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3285                                 &reg->req_q_out[0]);
3286                         if (req->ring_index < cnt)
3287                                 req->cnt = cnt - req->ring_index;
3288                         else
3289                                 req->cnt = req->length -
3290                                         (req->ring_index - cnt);
3291                         if (req->cnt < (req_cnt + 2))
3292                                 goto queuing_error;
3293                 }
3294
3295                 ctx = sp->u.scmd.ct6_ctx =
3296                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3297                 if (!ctx) {
3298                         ql_log(ql_log_fatal, vha, 0x3010,
3299                             "Failed to allocate ctx for cmd=%p.\n", cmd);
3300                         goto queuing_error;
3301                 }
3302
3303                 memset(ctx, 0, sizeof(struct ct6_dsd));
3304                 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3305                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3306                 if (!ctx->fcp_cmnd) {
3307                         ql_log(ql_log_fatal, vha, 0x3011,
3308                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3309                         goto queuing_error;
3310                 }
3311
3312                 /* Initialize the DSD list and dma handle */
3313                 INIT_LIST_HEAD(&ctx->dsd_list);
3314                 ctx->dsd_use_cnt = 0;
3315
3316                 if (cmd->cmd_len > 16) {
3317                         additional_cdb_len = cmd->cmd_len - 16;
3318                         if ((cmd->cmd_len % 4) != 0) {
3319                                 /* SCSI command bigger than 16 bytes must be
3320                                  * multiple of 4
3321                                  */
3322                                 ql_log(ql_log_warn, vha, 0x3012,
3323                                     "scsi cmd len %d not multiple of 4 "
3324                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3325                                 goto queuing_error_fcp_cmnd;
3326                         }
3327                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3328                 } else {
3329                         additional_cdb_len = 0;
3330                         ctx->fcp_cmnd_len = 12 + 16 + 4;
3331                 }
3332
3333                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3334                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3335
3336                 /* Zero out remaining portion of packet. */
3337                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3338                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3339                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3340                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3341
3342                 /* Set NPORT-ID and LUN number*/
3343                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3344                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3345                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3346                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3347                 cmd_pkt->vp_index = sp->vha->vp_idx;
3348
3349                 /* Build IOCB segments */
3350                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3351                         goto queuing_error_fcp_cmnd;
3352
3353                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3354                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3355
3356                 /* build FCP_CMND IU */
3357                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3358                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3359
3360                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3361                         ctx->fcp_cmnd->additional_cdb_len |= 1;
3362                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3363                         ctx->fcp_cmnd->additional_cdb_len |= 2;
3364
3365                 /* Populate the FCP_PRIO. */
3366                 if (ha->flags.fcp_prio_enabled)
3367                         ctx->fcp_cmnd->task_attribute |=
3368                             sp->fcport->fcp_prio << 3;
3369
3370                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3371
3372                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3373                     additional_cdb_len);
3374                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3375
3376                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3377                 put_unaligned_le64(ctx->fcp_cmnd_dma,
3378                                    &cmd_pkt->fcp_cmnd_dseg_address);
3379
3380                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3381                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3382                 /* Set total data segment count. */
3383                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3384                 /* Specify response queue number where
3385                  * completion should happen
3386                  */
3387                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3388         } else {
3389                 struct cmd_type_7 *cmd_pkt;
3390
3391                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3392                 if (req->cnt < (req_cnt + 2)) {
3393                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3394                             &reg->req_q_out[0]);
3395                         if (req->ring_index < cnt)
3396                                 req->cnt = cnt - req->ring_index;
3397                         else
3398                                 req->cnt = req->length -
3399                                         (req->ring_index - cnt);
3400                 }
3401                 if (req->cnt < (req_cnt + 2))
3402                         goto queuing_error;
3403
3404                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3405                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3406
3407                 /* Zero out remaining portion of packet. */
3408                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3409                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3410                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3411                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3412
3413                 /* Set NPORT-ID and LUN number*/
3414                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3415                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3416                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3417                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3418                 cmd_pkt->vp_index = sp->vha->vp_idx;
3419
3420                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3421                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3422                     sizeof(cmd_pkt->lun));
3423
3424                 /* Populate the FCP_PRIO. */
3425                 if (ha->flags.fcp_prio_enabled)
3426                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3427
3428                 /* Load SCSI command packet. */
3429                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3430                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3431
3432                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3433
3434                 /* Build IOCB segments */
3435                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3436
3437                 /* Set total data segment count. */
3438                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3439                 /* Specify response queue number where
3440                  * completion should happen.
3441                  */
3442                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3443
3444         }
3445         /* Build command packet. */
3446         req->current_outstanding_cmd = handle;
3447         req->outstanding_cmds[handle] = sp;
3448         sp->handle = handle;
3449         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3450         req->cnt -= req_cnt;
3451         wmb();
3452
3453         /* Adjust ring index. */
3454         req->ring_index++;
3455         if (req->ring_index == req->length) {
3456                 req->ring_index = 0;
3457                 req->ring_ptr = req->ring;
3458         } else
3459                 req->ring_ptr++;
3460
3461         sp->flags |= SRB_DMA_VALID;
3462
3463         /* Set chip new ring index. */
3464         /* write, read and verify logic */
3465         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3466         if (ql2xdbwr)
3467                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3468         else {
3469                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3470                 wmb();
3471                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3472                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3473                         wmb();
3474                 }
3475         }
3476
3477         /* Manage unprocessed RIO/ZIO commands in response queue. */
3478         if (vha->flags.process_response_queue &&
3479             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3480                 qla24xx_process_response_queue(vha, rsp);
3481
3482         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3483         return QLA_SUCCESS;
3484
3485 queuing_error_fcp_cmnd:
3486         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3487 queuing_error:
3488         if (tot_dsds)
3489                 scsi_dma_unmap(cmd);
3490
3491         if (sp->u.scmd.crc_ctx) {
3492                 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3493                 sp->u.scmd.crc_ctx = NULL;
3494         }
3495         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3496
3497         return QLA_FUNCTION_FAILED;
3498 }
3499
3500 static void
3501 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3502 {
3503         struct srb_iocb *aio = &sp->u.iocb_cmd;
3504         scsi_qla_host_t *vha = sp->vha;
3505         struct req_que *req = sp->qpair->req;
3506
3507         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3508         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3509         abt_iocb->entry_count = 1;
3510         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3511         if (sp->fcport) {
3512                 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3513                 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3514                 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3515                 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3516         }
3517         abt_iocb->handle_to_abort =
3518             cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3519                                     aio->u.abt.cmd_hndl));
3520         abt_iocb->vp_index = vha->vp_idx;
3521         abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3522         /* Send the command to the firmware */
3523         wmb();
3524 }
3525
3526 static void
3527 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3528 {
3529         int i, sz;
3530
3531         mbx->entry_type = MBX_IOCB_TYPE;
3532         mbx->handle = sp->handle;
3533         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3534
3535         for (i = 0; i < sz; i++)
3536                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3537 }
3538
3539 static void
3540 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3541 {
3542         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3543         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3544         ct_pkt->handle = sp->handle;
3545 }
3546
3547 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3548         struct nack_to_isp *nack)
3549 {
3550         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3551
3552         nack->entry_type = NOTIFY_ACK_TYPE;
3553         nack->entry_count = 1;
3554         nack->ox_id = ntfy->ox_id;
3555
3556         nack->u.isp24.handle = sp->handle;
3557         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3558         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3559                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3560                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3561         }
3562         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3563         nack->u.isp24.status = ntfy->u.isp24.status;
3564         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3565         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3566         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3567         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3568         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3569         nack->u.isp24.srr_flags = 0;
3570         nack->u.isp24.srr_reject_code = 0;
3571         nack->u.isp24.srr_reject_code_expl = 0;
3572         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3573 }
3574
3575 /*
3576  * Build NVME LS request
3577  */
3578 static int
3579 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3580 {
3581         struct srb_iocb *nvme;
3582         int     rval = QLA_SUCCESS;
3583
3584         nvme = &sp->u.iocb_cmd;
3585         cmd_pkt->entry_type = PT_LS4_REQUEST;
3586         cmd_pkt->entry_count = 1;
3587         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3588
3589         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3590         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3591         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3592
3593         cmd_pkt->tx_dseg_count = 1;
3594         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3595         cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3596         put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3597
3598         cmd_pkt->rx_dseg_count = 1;
3599         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3600         cmd_pkt->dsd[1].length  = nvme->u.nvme.rsp_len;
3601         put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3602
3603         return rval;
3604 }
3605
3606 static void
3607 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3608 {
3609         int map, pos;
3610
3611         vce->entry_type = VP_CTRL_IOCB_TYPE;
3612         vce->handle = sp->handle;
3613         vce->entry_count = 1;
3614         vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3615         vce->vp_count = cpu_to_le16(1);
3616
3617         /*
3618          * index map in firmware starts with 1; decrement index
3619          * this is ok as we never use index 0
3620          */
3621         map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3622         pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3623         vce->vp_idx_map[map] |= 1 << pos;
3624 }
3625
3626 static void
3627 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3628 {
3629         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3630         logio->control_flags =
3631             cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3632
3633         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3634         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3635         logio->port_id[1] = sp->fcport->d_id.b.area;
3636         logio->port_id[2] = sp->fcport->d_id.b.domain;
3637         logio->vp_index = sp->fcport->vha->vp_idx;
3638 }
3639
3640 int
3641 qla2x00_start_sp(srb_t *sp)
3642 {
3643         int rval = QLA_SUCCESS;
3644         scsi_qla_host_t *vha = sp->vha;
3645         struct qla_hw_data *ha = vha->hw;
3646         struct qla_qpair *qp = sp->qpair;
3647         void *pkt;
3648         unsigned long flags;
3649
3650         spin_lock_irqsave(qp->qp_lock_ptr, flags);
3651         pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3652         if (!pkt) {
3653                 rval = EAGAIN;
3654                 ql_log(ql_log_warn, vha, 0x700c,
3655                     "qla2x00_alloc_iocbs failed.\n");
3656                 goto done;
3657         }
3658
3659         switch (sp->type) {
3660         case SRB_LOGIN_CMD:
3661                 IS_FWI2_CAPABLE(ha) ?
3662                     qla24xx_login_iocb(sp, pkt) :
3663                     qla2x00_login_iocb(sp, pkt);
3664                 break;
3665         case SRB_PRLI_CMD:
3666                 qla24xx_prli_iocb(sp, pkt);
3667                 break;
3668         case SRB_LOGOUT_CMD:
3669                 IS_FWI2_CAPABLE(ha) ?
3670                     qla24xx_logout_iocb(sp, pkt) :
3671                     qla2x00_logout_iocb(sp, pkt);
3672                 break;
3673         case SRB_ELS_CMD_RPT:
3674         case SRB_ELS_CMD_HST:
3675                 qla24xx_els_iocb(sp, pkt);
3676                 break;
3677         case SRB_CT_CMD:
3678                 IS_FWI2_CAPABLE(ha) ?
3679                     qla24xx_ct_iocb(sp, pkt) :
3680                     qla2x00_ct_iocb(sp, pkt);
3681                 break;
3682         case SRB_ADISC_CMD:
3683                 IS_FWI2_CAPABLE(ha) ?
3684                     qla24xx_adisc_iocb(sp, pkt) :
3685                     qla2x00_adisc_iocb(sp, pkt);
3686                 break;
3687         case SRB_TM_CMD:
3688                 IS_QLAFX00(ha) ?
3689                     qlafx00_tm_iocb(sp, pkt) :
3690                     qla24xx_tm_iocb(sp, pkt);
3691                 break;
3692         case SRB_FXIOCB_DCMD:
3693         case SRB_FXIOCB_BCMD:
3694                 qlafx00_fxdisc_iocb(sp, pkt);
3695                 break;
3696         case SRB_NVME_LS:
3697                 qla_nvme_ls(sp, pkt);
3698                 break;
3699         case SRB_ABT_CMD:
3700                 IS_QLAFX00(ha) ?
3701                         qlafx00_abort_iocb(sp, pkt) :
3702                         qla24xx_abort_iocb(sp, pkt);
3703                 break;
3704         case SRB_ELS_DCMD:
3705                 qla24xx_els_logo_iocb(sp, pkt);
3706                 break;
3707         case SRB_CT_PTHRU_CMD:
3708                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3709                 break;
3710         case SRB_MB_IOCB:
3711                 qla2x00_mb_iocb(sp, pkt);
3712                 break;
3713         case SRB_NACK_PLOGI:
3714         case SRB_NACK_PRLI:
3715         case SRB_NACK_LOGO:
3716                 qla2x00_send_notify_ack_iocb(sp, pkt);
3717                 break;
3718         case SRB_CTRL_VP:
3719                 qla25xx_ctrlvp_iocb(sp, pkt);
3720                 break;
3721         case SRB_PRLO_CMD:
3722                 qla24xx_prlo_iocb(sp, pkt);
3723                 break;
3724         default:
3725                 break;
3726         }
3727
3728         if (sp->start_timer)
3729                 add_timer(&sp->u.iocb_cmd.timer);
3730
3731         wmb();
3732         qla2x00_start_iocbs(vha, qp->req);
3733 done:
3734         spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3735         return rval;
3736 }
3737
3738 static void
3739 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3740                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3741 {
3742         uint16_t avail_dsds;
3743         struct dsd64 *cur_dsd;
3744         uint32_t req_data_len = 0;
3745         uint32_t rsp_data_len = 0;
3746         struct scatterlist *sg;
3747         int index;
3748         int entry_count = 1;
3749         struct bsg_job *bsg_job = sp->u.bsg_job;
3750
3751         /*Update entry type to indicate bidir command */
3752         put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3753
3754         /* Set the transfer direction, in this set both flags
3755          * Also set the BD_WRAP_BACK flag, firmware will take care
3756          * assigning DID=SID for outgoing pkts.
3757          */
3758         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3759         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3760         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3761                                                         BD_WRAP_BACK);
3762
3763         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3764         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3765         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3766         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3767
3768         vha->bidi_stats.transfer_bytes += req_data_len;
3769         vha->bidi_stats.io_count++;
3770
3771         vha->qla_stats.output_bytes += req_data_len;
3772         vha->qla_stats.output_requests++;
3773
3774         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3775          * are bundled in continuation iocb
3776          */
3777         avail_dsds = 1;
3778         cur_dsd = &cmd_pkt->fcp_dsd;
3779
3780         index = 0;
3781
3782         for_each_sg(bsg_job->request_payload.sg_list, sg,
3783                                 bsg_job->request_payload.sg_cnt, index) {
3784                 cont_a64_entry_t *cont_pkt;
3785
3786                 /* Allocate additional continuation packets */
3787                 if (avail_dsds == 0) {
3788                         /* Continuation type 1 IOCB can accomodate
3789                          * 5 DSDS
3790                          */
3791                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3792                         cur_dsd = cont_pkt->dsd;
3793                         avail_dsds = 5;
3794                         entry_count++;
3795                 }
3796                 append_dsd64(&cur_dsd, sg);
3797                 avail_dsds--;
3798         }
3799         /* For read request DSD will always goes to continuation IOCB
3800          * and follow the write DSD. If there is room on the current IOCB
3801          * then it is added to that IOCB else new continuation IOCB is
3802          * allocated.
3803          */
3804         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3805                                 bsg_job->reply_payload.sg_cnt, index) {
3806                 cont_a64_entry_t *cont_pkt;
3807
3808                 /* Allocate additional continuation packets */
3809                 if (avail_dsds == 0) {
3810                         /* Continuation type 1 IOCB can accomodate
3811                          * 5 DSDS
3812                          */
3813                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3814                         cur_dsd = cont_pkt->dsd;
3815                         avail_dsds = 5;
3816                         entry_count++;
3817                 }
3818                 append_dsd64(&cur_dsd, sg);
3819                 avail_dsds--;
3820         }
3821         /* This value should be same as number of IOCB required for this cmd */
3822         cmd_pkt->entry_count = entry_count;
3823 }
3824
3825 int
3826 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3827 {
3828
3829         struct qla_hw_data *ha = vha->hw;
3830         unsigned long flags;
3831         uint32_t handle;
3832         uint16_t req_cnt;
3833         uint16_t cnt;
3834         uint32_t *clr_ptr;
3835         struct cmd_bidir *cmd_pkt = NULL;
3836         struct rsp_que *rsp;
3837         struct req_que *req;
3838         int rval = EXT_STATUS_OK;
3839
3840         rval = QLA_SUCCESS;
3841
3842         rsp = ha->rsp_q_map[0];
3843         req = vha->req;
3844
3845         /* Send marker if required */
3846         if (vha->marker_needed != 0) {
3847                 if (qla2x00_marker(vha, ha->base_qpair,
3848                         0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3849                         return EXT_STATUS_MAILBOX;
3850                 vha->marker_needed = 0;
3851         }
3852
3853         /* Acquire ring specific lock */
3854         spin_lock_irqsave(&ha->hardware_lock, flags);
3855
3856         handle = qla2xxx_get_next_handle(req);
3857         if (handle == 0) {
3858                 rval = EXT_STATUS_BUSY;
3859                 goto queuing_error;
3860         }
3861
3862         /* Calculate number of IOCB required */
3863         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3864
3865         /* Check for room on request queue. */
3866         if (req->cnt < req_cnt + 2) {
3867                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3868                     RD_REG_DWORD_RELAXED(req->req_q_out);
3869                 if  (req->ring_index < cnt)
3870                         req->cnt = cnt - req->ring_index;
3871                 else
3872                         req->cnt = req->length -
3873                                 (req->ring_index - cnt);
3874         }
3875         if (req->cnt < req_cnt + 2) {
3876                 rval = EXT_STATUS_BUSY;
3877                 goto queuing_error;
3878         }
3879
3880         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3881         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3882
3883         /* Zero out remaining portion of packet. */
3884         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3885         clr_ptr = (uint32_t *)cmd_pkt + 2;
3886         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3887
3888         /* Set NPORT-ID  (of vha)*/
3889         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3890         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3891         cmd_pkt->port_id[1] = vha->d_id.b.area;
3892         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3893
3894         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3895         cmd_pkt->entry_status = (uint8_t) rsp->id;
3896         /* Build command packet. */
3897         req->current_outstanding_cmd = handle;
3898         req->outstanding_cmds[handle] = sp;
3899         sp->handle = handle;
3900         req->cnt -= req_cnt;
3901
3902         /* Send the command to the firmware */
3903         wmb();
3904         qla2x00_start_iocbs(vha, req);
3905 queuing_error:
3906         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3907         return rval;
3908 }