]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
a91cca52b5d5be5a262270ae6b0cbefe5a72a8bf
[linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @sp: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @vha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @vha: HA context
118  * @req: request queue
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         /* Adjust ring index. */
128         req->ring_index++;
129         if (req->ring_index == req->length) {
130                 req->ring_index = 0;
131                 req->ring_ptr = req->ring;
132         } else {
133                 req->ring_ptr++;
134         }
135
136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138         /* Load packet defaults. */
139         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
140             cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141             cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
150         uint8_t guard = scsi_host_get_guard(cmd->device->host);
151
152         /* We always use DIFF Bundling for best performance */
153         *fw_prot_opts = 0;
154
155         /* Translate SCSI opcode to a protection opcode */
156         switch (scsi_get_prot_op(cmd)) {
157         case SCSI_PROT_READ_STRIP:
158                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159                 break;
160         case SCSI_PROT_WRITE_INSERT:
161                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162                 break;
163         case SCSI_PROT_READ_INSERT:
164                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165                 break;
166         case SCSI_PROT_WRITE_STRIP:
167                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168                 break;
169         case SCSI_PROT_READ_PASS:
170         case SCSI_PROT_WRITE_PASS:
171                 if (guard & SHOST_DIX_GUARD_IP)
172                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173                 else
174                         *fw_prot_opts |= PO_MODE_DIF_PASS;
175                 break;
176         default:        /* Normal Request */
177                 *fw_prot_opts |= PO_MODE_DIF_PASS;
178                 break;
179         }
180
181         return scsi_prot_sg_count(cmd);
182 }
183
184 /*
185  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186  * capable IOCB types.
187  *
188  * @sp: SRB command to process
189  * @cmd_pkt: Command type 2 IOCB
190  * @tot_dsds: Total number of segments to transfer
191  */
192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
193     uint16_t tot_dsds)
194 {
195         uint16_t        avail_dsds;
196         uint32_t        *cur_dsd;
197         scsi_qla_host_t *vha;
198         struct scsi_cmnd *cmd;
199         struct scatterlist *sg;
200         int i;
201
202         cmd = GET_CMD_SP(sp);
203
204         /* Update entry type to indicate Command Type 2 IOCB */
205         *((uint32_t *)(&cmd_pkt->entry_type)) =
206             cpu_to_le32(COMMAND_TYPE);
207
208         /* No data transfer */
209         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
210                 cmd_pkt->byte_count = cpu_to_le32(0);
211                 return;
212         }
213
214         vha = sp->vha;
215         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216
217         /* Three DSDs are available in the Command Type 2 IOCB */
218         avail_dsds = 3;
219         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220
221         /* Load data segments */
222         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223                 cont_entry_t *cont_pkt;
224
225                 /* Allocate additional continuation packets? */
226                 if (avail_dsds == 0) {
227                         /*
228                          * Seven DSDs are available in the Continuation
229                          * Type 0 IOCB.
230                          */
231                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
233                         avail_dsds = 7;
234                 }
235
236                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
238                 avail_dsds--;
239         }
240 }
241
242 /**
243  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244  * capable IOCB types.
245  *
246  * @sp: SRB command to process
247  * @cmd_pkt: Command type 3 IOCB
248  * @tot_dsds: Total number of segments to transfer
249  */
250 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
251     uint16_t tot_dsds)
252 {
253         uint16_t        avail_dsds;
254         uint32_t        *cur_dsd;
255         scsi_qla_host_t *vha;
256         struct scsi_cmnd *cmd;
257         struct scatterlist *sg;
258         int i;
259
260         cmd = GET_CMD_SP(sp);
261
262         /* Update entry type to indicate Command Type 3 IOCB */
263         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
264
265         /* No data transfer */
266         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267                 cmd_pkt->byte_count = cpu_to_le32(0);
268                 return;
269         }
270
271         vha = sp->vha;
272         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274         /* Two DSDs are available in the Command Type 3 IOCB */
275         avail_dsds = 2;
276         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278         /* Load data segments */
279         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280                 dma_addr_t      sle_dma;
281                 cont_a64_entry_t *cont_pkt;
282
283                 /* Allocate additional continuation packets? */
284                 if (avail_dsds == 0) {
285                         /*
286                          * Five DSDs are available in the Continuation
287                          * Type 1 IOCB.
288                          */
289                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291                         avail_dsds = 5;
292                 }
293
294                 sle_dma = sg_dma_address(sg);
295                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298                 avail_dsds--;
299         }
300 }
301
302 /**
303  * qla2x00_start_scsi() - Send a SCSI command to the ISP
304  * @sp: command to send to the ISP
305  *
306  * Returns non-zero if a failure occurred, else zero.
307  */
308 int
309 qla2x00_start_scsi(srb_t *sp)
310 {
311         int             nseg;
312         unsigned long   flags;
313         scsi_qla_host_t *vha;
314         struct scsi_cmnd *cmd;
315         uint32_t        *clr_ptr;
316         uint32_t        index;
317         uint32_t        handle;
318         cmd_entry_t     *cmd_pkt;
319         uint16_t        cnt;
320         uint16_t        req_cnt;
321         uint16_t        tot_dsds;
322         struct device_reg_2xxx __iomem *reg;
323         struct qla_hw_data *ha;
324         struct req_que *req;
325         struct rsp_que *rsp;
326
327         /* Setup device pointers. */
328         vha = sp->vha;
329         ha = vha->hw;
330         reg = &ha->iobase->isp;
331         cmd = GET_CMD_SP(sp);
332         req = ha->req_q_map[0];
333         rsp = ha->rsp_q_map[0];
334         /* So we know we haven't pci_map'ed anything yet */
335         tot_dsds = 0;
336
337         /* Send marker if required */
338         if (vha->marker_needed != 0) {
339                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
340                     QLA_SUCCESS) {
341                         return (QLA_FUNCTION_FAILED);
342                 }
343                 vha->marker_needed = 0;
344         }
345
346         /* Acquire ring specific lock */
347         spin_lock_irqsave(&ha->hardware_lock, flags);
348
349         /* Check for room in outstanding command list. */
350         handle = req->current_outstanding_cmd;
351         for (index = 1; index < req->num_outstanding_cmds; index++) {
352                 handle++;
353                 if (handle == req->num_outstanding_cmds)
354                         handle = 1;
355                 if (!req->outstanding_cmds[handle])
356                         break;
357         }
358         if (index == req->num_outstanding_cmds)
359                 goto queuing_error;
360
361         /* Map the sg table so we have an accurate count of sg entries needed */
362         if (scsi_sg_count(cmd)) {
363                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
364                     scsi_sg_count(cmd), cmd->sc_data_direction);
365                 if (unlikely(!nseg))
366                         goto queuing_error;
367         } else
368                 nseg = 0;
369
370         tot_dsds = nseg;
371
372         /* Calculate the number of request entries needed. */
373         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
374         if (req->cnt < (req_cnt + 2)) {
375                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
376                 if (req->ring_index < cnt)
377                         req->cnt = cnt - req->ring_index;
378                 else
379                         req->cnt = req->length -
380                             (req->ring_index - cnt);
381                 /* If still no head room then bail out */
382                 if (req->cnt < (req_cnt + 2))
383                         goto queuing_error;
384         }
385
386         /* Build command packet */
387         req->current_outstanding_cmd = handle;
388         req->outstanding_cmds[handle] = sp;
389         sp->handle = handle;
390         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
391         req->cnt -= req_cnt;
392
393         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
394         cmd_pkt->handle = handle;
395         /* Zero out remaining portion of packet. */
396         clr_ptr = (uint32_t *)cmd_pkt + 2;
397         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
398         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399
400         /* Set target ID and LUN number*/
401         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
402         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
403         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
404
405         /* Load SCSI command packet. */
406         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
407         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
408
409         /* Build IOCB segments */
410         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
411
412         /* Set total data segment count. */
413         cmd_pkt->entry_count = (uint8_t)req_cnt;
414         wmb();
415
416         /* Adjust ring index. */
417         req->ring_index++;
418         if (req->ring_index == req->length) {
419                 req->ring_index = 0;
420                 req->ring_ptr = req->ring;
421         } else
422                 req->ring_ptr++;
423
424         sp->flags |= SRB_DMA_VALID;
425
426         /* Set chip new ring index. */
427         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
428         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
429
430         /* Manage unprocessed RIO/ZIO commands in response queue. */
431         if (vha->flags.process_response_queue &&
432             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
433                 qla2x00_process_response_queue(rsp);
434
435         spin_unlock_irqrestore(&ha->hardware_lock, flags);
436         return (QLA_SUCCESS);
437
438 queuing_error:
439         if (tot_dsds)
440                 scsi_dma_unmap(cmd);
441
442         spin_unlock_irqrestore(&ha->hardware_lock, flags);
443
444         return (QLA_FUNCTION_FAILED);
445 }
446
447 /**
448  * qla2x00_start_iocbs() - Execute the IOCB command
449  * @vha: HA context
450  * @req: request queue
451  */
452 void
453 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
454 {
455         struct qla_hw_data *ha = vha->hw;
456         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
457
458         if (IS_P3P_TYPE(ha)) {
459                 qla82xx_start_iocbs(vha);
460         } else {
461                 /* Adjust ring index. */
462                 req->ring_index++;
463                 if (req->ring_index == req->length) {
464                         req->ring_index = 0;
465                         req->ring_ptr = req->ring;
466                 } else
467                         req->ring_ptr++;
468
469                 /* Set chip new ring index. */
470                 if (ha->mqenable || IS_QLA27XX(ha)) {
471                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
472                 } else if (IS_QLA83XX(ha)) {
473                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
474                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
475                 } else if (IS_QLAFX00(ha)) {
476                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
477                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
478                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
479                 } else if (IS_FWI2_CAPABLE(ha)) {
480                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
481                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
482                 } else {
483                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
484                                 req->ring_index);
485                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
486                 }
487         }
488 }
489
490 /**
491  * qla2x00_marker() - Send a marker IOCB to the firmware.
492  * @vha: HA context
493  * @req: request queue
494  * @rsp: response queue
495  * @loop_id: loop ID
496  * @lun: LUN
497  * @type: marker modifier
498  *
499  * Can be called from both normal and interrupt context.
500  *
501  * Returns non-zero if a failure occurred, else zero.
502  */
503 static int
504 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
505                         struct rsp_que *rsp, uint16_t loop_id,
506                         uint64_t lun, uint8_t type)
507 {
508         mrk_entry_t *mrk;
509         struct mrk_entry_24xx *mrk24 = NULL;
510
511         struct qla_hw_data *ha = vha->hw;
512         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
513
514         req = ha->req_q_map[0];
515         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
516         if (mrk == NULL) {
517                 ql_log(ql_log_warn, base_vha, 0x3026,
518                     "Failed to allocate Marker IOCB.\n");
519
520                 return (QLA_FUNCTION_FAILED);
521         }
522
523         mrk->entry_type = MARKER_TYPE;
524         mrk->modifier = type;
525         if (type != MK_SYNC_ALL) {
526                 if (IS_FWI2_CAPABLE(ha)) {
527                         mrk24 = (struct mrk_entry_24xx *) mrk;
528                         mrk24->nport_handle = cpu_to_le16(loop_id);
529                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531                         mrk24->vp_index = vha->vp_idx;
532                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
533                 } else {
534                         SET_TARGET_ID(ha, mrk->target, loop_id);
535                         mrk->lun = cpu_to_le16((uint16_t)lun);
536                 }
537         }
538         wmb();
539
540         qla2x00_start_iocbs(vha, req);
541
542         return (QLA_SUCCESS);
543 }
544
545 int
546 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
547                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
548                 uint8_t type)
549 {
550         int ret;
551         unsigned long flags = 0;
552
553         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
554         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
555         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
556
557         return (ret);
558 }
559
560 /*
561  * qla2x00_issue_marker
562  *
563  * Issue marker
564  * Caller CAN have hardware lock held as specified by ha_locked parameter.
565  * Might release it, then reaquire.
566  */
567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568 {
569         if (ha_locked) {
570                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
571                                         MK_SYNC_ALL) != QLA_SUCCESS)
572                         return QLA_FUNCTION_FAILED;
573         } else {
574                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
575                                         MK_SYNC_ALL) != QLA_SUCCESS)
576                         return QLA_FUNCTION_FAILED;
577         }
578         vha->marker_needed = 0;
579
580         return QLA_SUCCESS;
581 }
582
583 static inline int
584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585         uint16_t tot_dsds)
586 {
587         uint32_t *cur_dsd = NULL;
588         scsi_qla_host_t *vha;
589         struct qla_hw_data *ha;
590         struct scsi_cmnd *cmd;
591         struct  scatterlist *cur_seg;
592         uint32_t *dsd_seg;
593         void *next_dsd;
594         uint8_t avail_dsds;
595         uint8_t first_iocb = 1;
596         uint32_t dsd_list_len;
597         struct dsd_dma *dsd_ptr;
598         struct ct6_dsd *ctx;
599
600         cmd = GET_CMD_SP(sp);
601
602         /* Update entry type to indicate Command Type 3 IOCB */
603         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
604
605         /* No data transfer */
606         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
607                 cmd_pkt->byte_count = cpu_to_le32(0);
608                 return 0;
609         }
610
611         vha = sp->vha;
612         ha = vha->hw;
613
614         /* Set transfer direction */
615         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
616                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
617                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
618                 vha->qla_stats.output_requests++;
619         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
620                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
621                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622                 vha->qla_stats.input_requests++;
623         }
624
625         cur_seg = scsi_sglist(cmd);
626         ctx = GET_CMD_CTX_SP(sp);
627
628         while (tot_dsds) {
629                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630                     QLA_DSDS_PER_IOCB : tot_dsds;
631                 tot_dsds -= avail_dsds;
632                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
633
634                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635                     struct dsd_dma, list);
636                 next_dsd = dsd_ptr->dsd_addr;
637                 list_del(&dsd_ptr->list);
638                 ha->gbl_dsd_avail--;
639                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640                 ctx->dsd_use_cnt++;
641                 ha->gbl_dsd_inuse++;
642
643                 if (first_iocb) {
644                         first_iocb = 0;
645                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
649                 } else {
650                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
653                 }
654                 cur_dsd = (uint32_t *)next_dsd;
655                 while (avail_dsds) {
656                         dma_addr_t      sle_dma;
657
658                         sle_dma = sg_dma_address(cur_seg);
659                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
662                         cur_seg = sg_next(cur_seg);
663                         avail_dsds--;
664                 }
665         }
666
667         /* Null termination */
668         *cur_dsd++ =  0;
669         *cur_dsd++ = 0;
670         *cur_dsd++ = 0;
671         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
672         return 0;
673 }
674
675 /*
676  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677  * for Command Type 6.
678  *
679  * @dsds: number of data segment decriptors needed
680  *
681  * Returns the number of dsd list needed to store @dsds.
682  */
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds)
685 {
686         uint16_t dsd_lists = 0;
687
688         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689         if (dsds % QLA_DSDS_PER_IOCB)
690                 dsd_lists++;
691         return dsd_lists;
692 }
693
694
695 /**
696  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
697  * IOCB types.
698  *
699  * @sp: SRB command to process
700  * @cmd_pkt: Command type 3 IOCB
701  * @tot_dsds: Total number of segments to transfer
702  * @req: pointer to request queue
703  */
704 inline void
705 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
706         uint16_t tot_dsds, struct req_que *req)
707 {
708         uint16_t        avail_dsds;
709         uint32_t        *cur_dsd;
710         scsi_qla_host_t *vha;
711         struct scsi_cmnd *cmd;
712         struct scatterlist *sg;
713         int i;
714
715         cmd = GET_CMD_SP(sp);
716
717         /* Update entry type to indicate Command Type 3 IOCB */
718         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
719
720         /* No data transfer */
721         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
722                 cmd_pkt->byte_count = cpu_to_le32(0);
723                 return;
724         }
725
726         vha = sp->vha;
727
728         /* Set transfer direction */
729         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
730                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
731                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
732                 vha->qla_stats.output_requests++;
733         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
734                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
735                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
736                 vha->qla_stats.input_requests++;
737         }
738
739         /* One DSD is available in the Command Type 3 IOCB */
740         avail_dsds = 1;
741         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
742
743         /* Load data segments */
744
745         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
746                 dma_addr_t      sle_dma;
747                 cont_a64_entry_t *cont_pkt;
748
749                 /* Allocate additional continuation packets? */
750                 if (avail_dsds == 0) {
751                         /*
752                          * Five DSDs are available in the Continuation
753                          * Type 1 IOCB.
754                          */
755                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
756                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
757                         avail_dsds = 5;
758                 }
759
760                 sle_dma = sg_dma_address(sg);
761                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
762                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
763                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
764                 avail_dsds--;
765         }
766 }
767
768 struct fw_dif_context {
769         uint32_t ref_tag;
770         uint16_t app_tag;
771         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
772         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
773 };
774
775 /*
776  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
777  *
778  */
779 static inline void
780 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
781     unsigned int protcnt)
782 {
783         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
784
785         switch (scsi_get_prot_type(cmd)) {
786         case SCSI_PROT_DIF_TYPE0:
787                 /*
788                  * No check for ql2xenablehba_err_chk, as it would be an
789                  * I/O error if hba tag generation is not done.
790                  */
791                 pkt->ref_tag = cpu_to_le32((uint32_t)
792                     (0xffffffff & scsi_get_lba(cmd)));
793
794                 if (!qla2x00_hba_err_chk_enabled(sp))
795                         break;
796
797                 pkt->ref_tag_mask[0] = 0xff;
798                 pkt->ref_tag_mask[1] = 0xff;
799                 pkt->ref_tag_mask[2] = 0xff;
800                 pkt->ref_tag_mask[3] = 0xff;
801                 break;
802
803         /*
804          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805          * match LBA in CDB + N
806          */
807         case SCSI_PROT_DIF_TYPE2:
808                 pkt->app_tag = cpu_to_le16(0);
809                 pkt->app_tag_mask[0] = 0x0;
810                 pkt->app_tag_mask[1] = 0x0;
811
812                 pkt->ref_tag = cpu_to_le32((uint32_t)
813                     (0xffffffff & scsi_get_lba(cmd)));
814
815                 if (!qla2x00_hba_err_chk_enabled(sp))
816                         break;
817
818                 /* enable ALL bytes of the ref tag */
819                 pkt->ref_tag_mask[0] = 0xff;
820                 pkt->ref_tag_mask[1] = 0xff;
821                 pkt->ref_tag_mask[2] = 0xff;
822                 pkt->ref_tag_mask[3] = 0xff;
823                 break;
824
825         /* For Type 3 protection: 16 bit GUARD only */
826         case SCSI_PROT_DIF_TYPE3:
827                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
828                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
829                                                                 0x00;
830                 break;
831
832         /*
833          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
834          * 16 bit app tag.
835          */
836         case SCSI_PROT_DIF_TYPE1:
837                 pkt->ref_tag = cpu_to_le32((uint32_t)
838                     (0xffffffff & scsi_get_lba(cmd)));
839                 pkt->app_tag = cpu_to_le16(0);
840                 pkt->app_tag_mask[0] = 0x0;
841                 pkt->app_tag_mask[1] = 0x0;
842
843                 if (!qla2x00_hba_err_chk_enabled(sp))
844                         break;
845
846                 /* enable ALL bytes of the ref tag */
847                 pkt->ref_tag_mask[0] = 0xff;
848                 pkt->ref_tag_mask[1] = 0xff;
849                 pkt->ref_tag_mask[2] = 0xff;
850                 pkt->ref_tag_mask[3] = 0xff;
851                 break;
852         }
853 }
854
855 int
856 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
857         uint32_t *partial)
858 {
859         struct scatterlist *sg;
860         uint32_t cumulative_partial, sg_len;
861         dma_addr_t sg_dma_addr;
862
863         if (sgx->num_bytes == sgx->tot_bytes)
864                 return 0;
865
866         sg = sgx->cur_sg;
867         cumulative_partial = sgx->tot_partial;
868
869         sg_dma_addr = sg_dma_address(sg);
870         sg_len = sg_dma_len(sg);
871
872         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
873
874         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
875                 sgx->dma_len = (blk_sz - cumulative_partial);
876                 sgx->tot_partial = 0;
877                 sgx->num_bytes += blk_sz;
878                 *partial = 0;
879         } else {
880                 sgx->dma_len = sg_len - sgx->bytes_consumed;
881                 sgx->tot_partial += sgx->dma_len;
882                 *partial = 1;
883         }
884
885         sgx->bytes_consumed += sgx->dma_len;
886
887         if (sg_len == sgx->bytes_consumed) {
888                 sg = sg_next(sg);
889                 sgx->num_sg++;
890                 sgx->cur_sg = sg;
891                 sgx->bytes_consumed = 0;
892         }
893
894         return 1;
895 }
896
897 int
898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
899         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
900 {
901         void *next_dsd;
902         uint8_t avail_dsds = 0;
903         uint32_t dsd_list_len;
904         struct dsd_dma *dsd_ptr;
905         struct scatterlist *sg_prot;
906         uint32_t *cur_dsd = dsd;
907         uint16_t        used_dsds = tot_dsds;
908         uint32_t        prot_int; /* protection interval */
909         uint32_t        partial;
910         struct qla2_sgx sgx;
911         dma_addr_t      sle_dma;
912         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
913         struct scsi_cmnd *cmd;
914
915         memset(&sgx, 0, sizeof(struct qla2_sgx));
916         if (sp) {
917                 cmd = GET_CMD_SP(sp);
918                 prot_int = cmd->device->sector_size;
919
920                 sgx.tot_bytes = scsi_bufflen(cmd);
921                 sgx.cur_sg = scsi_sglist(cmd);
922                 sgx.sp = sp;
923
924                 sg_prot = scsi_prot_sglist(cmd);
925         } else if (tc) {
926                 prot_int      = tc->blk_sz;
927                 sgx.tot_bytes = tc->bufflen;
928                 sgx.cur_sg    = tc->sg;
929                 sg_prot       = tc->prot_sg;
930         } else {
931                 BUG();
932                 return 1;
933         }
934
935         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
936
937                 sle_dma = sgx.dma_addr;
938                 sle_dma_len = sgx.dma_len;
939 alloc_and_fill:
940                 /* Allocate additional continuation packets? */
941                 if (avail_dsds == 0) {
942                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
943                                         QLA_DSDS_PER_IOCB : used_dsds;
944                         dsd_list_len = (avail_dsds + 1) * 12;
945                         used_dsds -= avail_dsds;
946
947                         /* allocate tracking DS */
948                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
949                         if (!dsd_ptr)
950                                 return 1;
951
952                         /* allocate new list */
953                         dsd_ptr->dsd_addr = next_dsd =
954                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
955                                 &dsd_ptr->dsd_list_dma);
956
957                         if (!next_dsd) {
958                                 /*
959                                  * Need to cleanup only this dsd_ptr, rest
960                                  * will be done by sp_free_dma()
961                                  */
962                                 kfree(dsd_ptr);
963                                 return 1;
964                         }
965
966                         if (sp) {
967                                 list_add_tail(&dsd_ptr->list,
968                                     &((struct crc_context *)
969                                             sp->u.scmd.ctx)->dsd_list);
970
971                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
972                         } else {
973                                 list_add_tail(&dsd_ptr->list,
974                                     &(tc->ctx->dsd_list));
975                                 *tc->ctx_dsd_alloced = 1;
976                         }
977
978
979                         /* add new list to cmd iocb or last list */
980                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
981                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
982                         *cur_dsd++ = dsd_list_len;
983                         cur_dsd = (uint32_t *)next_dsd;
984                 }
985                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
986                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
987                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
988                 avail_dsds--;
989
990                 if (partial == 0) {
991                         /* Got a full protection interval */
992                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
993                         sle_dma_len = 8;
994
995                         tot_prot_dma_len += sle_dma_len;
996                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
997                                 tot_prot_dma_len = 0;
998                                 sg_prot = sg_next(sg_prot);
999                         }
1000
1001                         partial = 1; /* So as to not re-enter this block */
1002                         goto alloc_and_fill;
1003                 }
1004         }
1005         /* Null termination */
1006         *cur_dsd++ = 0;
1007         *cur_dsd++ = 0;
1008         *cur_dsd++ = 0;
1009         return 0;
1010 }
1011
1012 int
1013 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1014         uint16_t tot_dsds, struct qla_tc_param *tc)
1015 {
1016         void *next_dsd;
1017         uint8_t avail_dsds = 0;
1018         uint32_t dsd_list_len;
1019         struct dsd_dma *dsd_ptr;
1020         struct scatterlist *sg, *sgl;
1021         uint32_t *cur_dsd = dsd;
1022         int     i;
1023         uint16_t        used_dsds = tot_dsds;
1024         struct scsi_cmnd *cmd;
1025
1026         if (sp) {
1027                 cmd = GET_CMD_SP(sp);
1028                 sgl = scsi_sglist(cmd);
1029         } else if (tc) {
1030                 sgl = tc->sg;
1031         } else {
1032                 BUG();
1033                 return 1;
1034         }
1035
1036
1037         for_each_sg(sgl, sg, tot_dsds, i) {
1038                 dma_addr_t      sle_dma;
1039
1040                 /* Allocate additional continuation packets? */
1041                 if (avail_dsds == 0) {
1042                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1043                                         QLA_DSDS_PER_IOCB : used_dsds;
1044                         dsd_list_len = (avail_dsds + 1) * 12;
1045                         used_dsds -= avail_dsds;
1046
1047                         /* allocate tracking DS */
1048                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1049                         if (!dsd_ptr)
1050                                 return 1;
1051
1052                         /* allocate new list */
1053                         dsd_ptr->dsd_addr = next_dsd =
1054                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1055                                 &dsd_ptr->dsd_list_dma);
1056
1057                         if (!next_dsd) {
1058                                 /*
1059                                  * Need to cleanup only this dsd_ptr, rest
1060                                  * will be done by sp_free_dma()
1061                                  */
1062                                 kfree(dsd_ptr);
1063                                 return 1;
1064                         }
1065
1066                         if (sp) {
1067                                 list_add_tail(&dsd_ptr->list,
1068                                     &((struct crc_context *)
1069                                             sp->u.scmd.ctx)->dsd_list);
1070
1071                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1072                         } else {
1073                                 list_add_tail(&dsd_ptr->list,
1074                                     &(tc->ctx->dsd_list));
1075                                 *tc->ctx_dsd_alloced = 1;
1076                         }
1077
1078                         /* add new list to cmd iocb or last list */
1079                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1080                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1081                         *cur_dsd++ = dsd_list_len;
1082                         cur_dsd = (uint32_t *)next_dsd;
1083                 }
1084                 sle_dma = sg_dma_address(sg);
1085
1086                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1087                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1088                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1089                 avail_dsds--;
1090
1091         }
1092         /* Null termination */
1093         *cur_dsd++ = 0;
1094         *cur_dsd++ = 0;
1095         *cur_dsd++ = 0;
1096         return 0;
1097 }
1098
1099 int
1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1101         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1102 {
1103         void *next_dsd;
1104         uint8_t avail_dsds = 0;
1105         uint32_t dsd_list_len;
1106         struct dsd_dma *dsd_ptr;
1107         struct scatterlist *sg, *sgl;
1108         int     i;
1109         struct scsi_cmnd *cmd;
1110         uint32_t *cur_dsd = dsd;
1111         uint16_t used_dsds = tot_dsds;
1112         struct scsi_qla_host *vha;
1113
1114         if (sp) {
1115                 cmd = GET_CMD_SP(sp);
1116                 sgl = scsi_prot_sglist(cmd);
1117                 vha = sp->vha;
1118         } else if (tc) {
1119                 vha = tc->vha;
1120                 sgl = tc->prot_sg;
1121         } else {
1122                 BUG();
1123                 return 1;
1124         }
1125
1126         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1127                 "%s: enter\n", __func__);
1128
1129         for_each_sg(sgl, sg, tot_dsds, i) {
1130                 dma_addr_t      sle_dma;
1131
1132                 /* Allocate additional continuation packets? */
1133                 if (avail_dsds == 0) {
1134                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1135                                                 QLA_DSDS_PER_IOCB : used_dsds;
1136                         dsd_list_len = (avail_dsds + 1) * 12;
1137                         used_dsds -= avail_dsds;
1138
1139                         /* allocate tracking DS */
1140                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1141                         if (!dsd_ptr)
1142                                 return 1;
1143
1144                         /* allocate new list */
1145                         dsd_ptr->dsd_addr = next_dsd =
1146                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1147                                 &dsd_ptr->dsd_list_dma);
1148
1149                         if (!next_dsd) {
1150                                 /*
1151                                  * Need to cleanup only this dsd_ptr, rest
1152                                  * will be done by sp_free_dma()
1153                                  */
1154                                 kfree(dsd_ptr);
1155                                 return 1;
1156                         }
1157
1158                         if (sp) {
1159                                 list_add_tail(&dsd_ptr->list,
1160                                     &((struct crc_context *)
1161                                             sp->u.scmd.ctx)->dsd_list);
1162
1163                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1164                         } else {
1165                                 list_add_tail(&dsd_ptr->list,
1166                                     &(tc->ctx->dsd_list));
1167                                 *tc->ctx_dsd_alloced = 1;
1168                         }
1169
1170                         /* add new list to cmd iocb or last list */
1171                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1172                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1173                         *cur_dsd++ = dsd_list_len;
1174                         cur_dsd = (uint32_t *)next_dsd;
1175                 }
1176                 sle_dma = sg_dma_address(sg);
1177
1178                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1179                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1180                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1181
1182                 avail_dsds--;
1183         }
1184         /* Null termination */
1185         *cur_dsd++ = 0;
1186         *cur_dsd++ = 0;
1187         *cur_dsd++ = 0;
1188         return 0;
1189 }
1190
1191 /**
1192  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1193  *                                                      Type 6 IOCB types.
1194  *
1195  * @sp: SRB command to process
1196  * @cmd_pkt: Command type 3 IOCB
1197  * @tot_dsds: Total number of segments to transfer
1198  * @tot_prot_dsds:
1199  * @fw_prot_opts:
1200  */
1201 inline int
1202 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1203     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1204 {
1205         uint32_t                *cur_dsd, *fcp_dl;
1206         scsi_qla_host_t         *vha;
1207         struct scsi_cmnd        *cmd;
1208         uint32_t                total_bytes = 0;
1209         uint32_t                data_bytes;
1210         uint32_t                dif_bytes;
1211         uint8_t                 bundling = 1;
1212         uint16_t                blk_size;
1213         struct crc_context      *crc_ctx_pkt = NULL;
1214         struct qla_hw_data      *ha;
1215         uint8_t                 additional_fcpcdb_len;
1216         uint16_t                fcp_cmnd_len;
1217         struct fcp_cmnd         *fcp_cmnd;
1218         dma_addr_t              crc_ctx_dma;
1219
1220         cmd = GET_CMD_SP(sp);
1221
1222         /* Update entry type to indicate Command Type CRC_2 IOCB */
1223         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1224
1225         vha = sp->vha;
1226         ha = vha->hw;
1227
1228         /* No data transfer */
1229         data_bytes = scsi_bufflen(cmd);
1230         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1231                 cmd_pkt->byte_count = cpu_to_le32(0);
1232                 return QLA_SUCCESS;
1233         }
1234
1235         cmd_pkt->vp_index = sp->vha->vp_idx;
1236
1237         /* Set transfer direction */
1238         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1239                 cmd_pkt->control_flags =
1240                     cpu_to_le16(CF_WRITE_DATA);
1241         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1242                 cmd_pkt->control_flags =
1243                     cpu_to_le16(CF_READ_DATA);
1244         }
1245
1246         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1247             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1248             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1249             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1250                 bundling = 0;
1251
1252         /* Allocate CRC context from global pool */
1253         crc_ctx_pkt = sp->u.scmd.ctx =
1254             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1255
1256         if (!crc_ctx_pkt)
1257                 goto crc_queuing_error;
1258
1259         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1260
1261         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1262
1263         /* Set handle */
1264         crc_ctx_pkt->handle = cmd_pkt->handle;
1265
1266         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1267
1268         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1269             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1270
1271         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1272         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1273         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1274
1275         /* Determine SCSI command length -- align to 4 byte boundary */
1276         if (cmd->cmd_len > 16) {
1277                 additional_fcpcdb_len = cmd->cmd_len - 16;
1278                 if ((cmd->cmd_len % 4) != 0) {
1279                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1280                         goto crc_queuing_error;
1281                 }
1282                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1283         } else {
1284                 additional_fcpcdb_len = 0;
1285                 fcp_cmnd_len = 12 + 16 + 4;
1286         }
1287
1288         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1289
1290         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1291         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1292                 fcp_cmnd->additional_cdb_len |= 1;
1293         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1294                 fcp_cmnd->additional_cdb_len |= 2;
1295
1296         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1297         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1298         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1299         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1300             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1302             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1303         fcp_cmnd->task_management = 0;
1304         fcp_cmnd->task_attribute = TSK_SIMPLE;
1305
1306         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1307
1308         /* Compute dif len and adjust data len to incude protection */
1309         dif_bytes = 0;
1310         blk_size = cmd->device->sector_size;
1311         dif_bytes = (data_bytes / blk_size) * 8;
1312
1313         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1314         case SCSI_PROT_READ_INSERT:
1315         case SCSI_PROT_WRITE_STRIP:
1316             total_bytes = data_bytes;
1317             data_bytes += dif_bytes;
1318             break;
1319
1320         case SCSI_PROT_READ_STRIP:
1321         case SCSI_PROT_WRITE_INSERT:
1322         case SCSI_PROT_READ_PASS:
1323         case SCSI_PROT_WRITE_PASS:
1324             total_bytes = data_bytes + dif_bytes;
1325             break;
1326         default:
1327             BUG();
1328         }
1329
1330         if (!qla2x00_hba_err_chk_enabled(sp))
1331                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1332         /* HBA error checking enabled */
1333         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1334                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1335                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1336                         SCSI_PROT_DIF_TYPE2))
1337                         fw_prot_opts |= BIT_10;
1338                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1339                     SCSI_PROT_DIF_TYPE3)
1340                         fw_prot_opts |= BIT_11;
1341         }
1342
1343         if (!bundling) {
1344                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1345         } else {
1346                 /*
1347                  * Configure Bundling if we need to fetch interlaving
1348                  * protection PCI accesses
1349                  */
1350                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1351                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1352                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1353                                                         tot_prot_dsds);
1354                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1355         }
1356
1357         /* Finish the common fields of CRC pkt */
1358         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1359         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1360         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1361         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1362         /* Fibre channel byte count */
1363         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1364         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1365             additional_fcpcdb_len);
1366         *fcp_dl = htonl(total_bytes);
1367
1368         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1369                 cmd_pkt->byte_count = cpu_to_le32(0);
1370                 return QLA_SUCCESS;
1371         }
1372         /* Walks data segments */
1373
1374         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1375
1376         if (!bundling && tot_prot_dsds) {
1377                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1378                         cur_dsd, tot_dsds, NULL))
1379                         goto crc_queuing_error;
1380         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1381                         (tot_dsds - tot_prot_dsds), NULL))
1382                 goto crc_queuing_error;
1383
1384         if (bundling && tot_prot_dsds) {
1385                 /* Walks dif segments */
1386                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1387                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1388                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1389                                 tot_prot_dsds, NULL))
1390                         goto crc_queuing_error;
1391         }
1392         return QLA_SUCCESS;
1393
1394 crc_queuing_error:
1395         /* Cleanup will be performed by the caller */
1396
1397         return QLA_FUNCTION_FAILED;
1398 }
1399
1400 /**
1401  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1402  * @sp: command to send to the ISP
1403  *
1404  * Returns non-zero if a failure occurred, else zero.
1405  */
1406 int
1407 qla24xx_start_scsi(srb_t *sp)
1408 {
1409         int             nseg;
1410         unsigned long   flags;
1411         uint32_t        *clr_ptr;
1412         uint32_t        index;
1413         uint32_t        handle;
1414         struct cmd_type_7 *cmd_pkt;
1415         uint16_t        cnt;
1416         uint16_t        req_cnt;
1417         uint16_t        tot_dsds;
1418         struct req_que *req = NULL;
1419         struct rsp_que *rsp = NULL;
1420         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1421         struct scsi_qla_host *vha = sp->vha;
1422         struct qla_hw_data *ha = vha->hw;
1423
1424         /* Setup device pointers. */
1425         req = vha->req;
1426         rsp = req->rsp;
1427
1428         /* So we know we haven't pci_map'ed anything yet */
1429         tot_dsds = 0;
1430
1431         /* Send marker if required */
1432         if (vha->marker_needed != 0) {
1433                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1434                     QLA_SUCCESS)
1435                         return QLA_FUNCTION_FAILED;
1436                 vha->marker_needed = 0;
1437         }
1438
1439         /* Acquire ring specific lock */
1440         spin_lock_irqsave(&ha->hardware_lock, flags);
1441
1442         /* Check for room in outstanding command list. */
1443         handle = req->current_outstanding_cmd;
1444         for (index = 1; index < req->num_outstanding_cmds; index++) {
1445                 handle++;
1446                 if (handle == req->num_outstanding_cmds)
1447                         handle = 1;
1448                 if (!req->outstanding_cmds[handle])
1449                         break;
1450         }
1451         if (index == req->num_outstanding_cmds)
1452                 goto queuing_error;
1453
1454         /* Map the sg table so we have an accurate count of sg entries needed */
1455         if (scsi_sg_count(cmd)) {
1456                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1457                     scsi_sg_count(cmd), cmd->sc_data_direction);
1458                 if (unlikely(!nseg))
1459                         goto queuing_error;
1460         } else
1461                 nseg = 0;
1462
1463         tot_dsds = nseg;
1464         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1465         if (req->cnt < (req_cnt + 2)) {
1466                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1467                     RD_REG_DWORD_RELAXED(req->req_q_out);
1468                 if (req->ring_index < cnt)
1469                         req->cnt = cnt - req->ring_index;
1470                 else
1471                         req->cnt = req->length -
1472                                 (req->ring_index - cnt);
1473                 if (req->cnt < (req_cnt + 2))
1474                         goto queuing_error;
1475         }
1476
1477         /* Build command packet. */
1478         req->current_outstanding_cmd = handle;
1479         req->outstanding_cmds[handle] = sp;
1480         sp->handle = handle;
1481         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1482         req->cnt -= req_cnt;
1483
1484         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1485         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1486
1487         /* Zero out remaining portion of packet. */
1488         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1489         clr_ptr = (uint32_t *)cmd_pkt + 2;
1490         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1491         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1492
1493         /* Set NPORT-ID and LUN number*/
1494         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1495         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1496         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1497         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1498         cmd_pkt->vp_index = sp->vha->vp_idx;
1499
1500         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1501         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1502
1503         cmd_pkt->task = TSK_SIMPLE;
1504
1505         /* Load SCSI command packet. */
1506         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1507         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1508
1509         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1510
1511         /* Build IOCB segments */
1512         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1513
1514         /* Set total data segment count. */
1515         cmd_pkt->entry_count = (uint8_t)req_cnt;
1516         wmb();
1517         /* Adjust ring index. */
1518         req->ring_index++;
1519         if (req->ring_index == req->length) {
1520                 req->ring_index = 0;
1521                 req->ring_ptr = req->ring;
1522         } else
1523                 req->ring_ptr++;
1524
1525         sp->flags |= SRB_DMA_VALID;
1526
1527         /* Set chip new ring index. */
1528         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1529         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1530
1531         /* Manage unprocessed RIO/ZIO commands in response queue. */
1532         if (vha->flags.process_response_queue &&
1533                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1534                 qla24xx_process_response_queue(vha, rsp);
1535
1536         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537         return QLA_SUCCESS;
1538
1539 queuing_error:
1540         if (tot_dsds)
1541                 scsi_dma_unmap(cmd);
1542
1543         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1544
1545         return QLA_FUNCTION_FAILED;
1546 }
1547
1548 /**
1549  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1550  * @sp: command to send to the ISP
1551  *
1552  * Returns non-zero if a failure occurred, else zero.
1553  */
1554 int
1555 qla24xx_dif_start_scsi(srb_t *sp)
1556 {
1557         int                     nseg;
1558         unsigned long           flags;
1559         uint32_t                *clr_ptr;
1560         uint32_t                index;
1561         uint32_t                handle;
1562         uint16_t                cnt;
1563         uint16_t                req_cnt = 0;
1564         uint16_t                tot_dsds;
1565         uint16_t                tot_prot_dsds;
1566         uint16_t                fw_prot_opts = 0;
1567         struct req_que          *req = NULL;
1568         struct rsp_que          *rsp = NULL;
1569         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1570         struct scsi_qla_host    *vha = sp->vha;
1571         struct qla_hw_data      *ha = vha->hw;
1572         struct cmd_type_crc_2   *cmd_pkt;
1573         uint32_t                status = 0;
1574
1575 #define QDSS_GOT_Q_SPACE        BIT_0
1576
1577         /* Only process protection or >16 cdb in this routine */
1578         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1579                 if (cmd->cmd_len <= 16)
1580                         return qla24xx_start_scsi(sp);
1581         }
1582
1583         /* Setup device pointers. */
1584         req = vha->req;
1585         rsp = req->rsp;
1586
1587         /* So we know we haven't pci_map'ed anything yet */
1588         tot_dsds = 0;
1589
1590         /* Send marker if required */
1591         if (vha->marker_needed != 0) {
1592                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1593                     QLA_SUCCESS)
1594                         return QLA_FUNCTION_FAILED;
1595                 vha->marker_needed = 0;
1596         }
1597
1598         /* Acquire ring specific lock */
1599         spin_lock_irqsave(&ha->hardware_lock, flags);
1600
1601         /* Check for room in outstanding command list. */
1602         handle = req->current_outstanding_cmd;
1603         for (index = 1; index < req->num_outstanding_cmds; index++) {
1604                 handle++;
1605                 if (handle == req->num_outstanding_cmds)
1606                         handle = 1;
1607                 if (!req->outstanding_cmds[handle])
1608                         break;
1609         }
1610
1611         if (index == req->num_outstanding_cmds)
1612                 goto queuing_error;
1613
1614         /* Compute number of required data segments */
1615         /* Map the sg table so we have an accurate count of sg entries needed */
1616         if (scsi_sg_count(cmd)) {
1617                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1618                     scsi_sg_count(cmd), cmd->sc_data_direction);
1619                 if (unlikely(!nseg))
1620                         goto queuing_error;
1621                 else
1622                         sp->flags |= SRB_DMA_VALID;
1623
1624                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1625                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1626                         struct qla2_sgx sgx;
1627                         uint32_t        partial;
1628
1629                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1630                         sgx.tot_bytes = scsi_bufflen(cmd);
1631                         sgx.cur_sg = scsi_sglist(cmd);
1632                         sgx.sp = sp;
1633
1634                         nseg = 0;
1635                         while (qla24xx_get_one_block_sg(
1636                             cmd->device->sector_size, &sgx, &partial))
1637                                 nseg++;
1638                 }
1639         } else
1640                 nseg = 0;
1641
1642         /* number of required data segments */
1643         tot_dsds = nseg;
1644
1645         /* Compute number of required protection segments */
1646         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1647                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1648                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1649                 if (unlikely(!nseg))
1650                         goto queuing_error;
1651                 else
1652                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1653
1654                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1655                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1656                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1657                 }
1658         } else {
1659                 nseg = 0;
1660         }
1661
1662         req_cnt = 1;
1663         /* Total Data and protection sg segment(s) */
1664         tot_prot_dsds = nseg;
1665         tot_dsds += nseg;
1666         if (req->cnt < (req_cnt + 2)) {
1667                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1668                     RD_REG_DWORD_RELAXED(req->req_q_out);
1669                 if (req->ring_index < cnt)
1670                         req->cnt = cnt - req->ring_index;
1671                 else
1672                         req->cnt = req->length -
1673                                 (req->ring_index - cnt);
1674                 if (req->cnt < (req_cnt + 2))
1675                         goto queuing_error;
1676         }
1677
1678         status |= QDSS_GOT_Q_SPACE;
1679
1680         /* Build header part of command packet (excluding the OPCODE). */
1681         req->current_outstanding_cmd = handle;
1682         req->outstanding_cmds[handle] = sp;
1683         sp->handle = handle;
1684         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1685         req->cnt -= req_cnt;
1686
1687         /* Fill-in common area */
1688         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1689         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1690
1691         clr_ptr = (uint32_t *)cmd_pkt + 2;
1692         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1693
1694         /* Set NPORT-ID and LUN number*/
1695         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1696         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1697         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1698         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1699
1700         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1701         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1702
1703         /* Total Data and protection segment(s) */
1704         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1705
1706         /* Build IOCB segments and adjust for data protection segments */
1707         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1708             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1709                 QLA_SUCCESS)
1710                 goto queuing_error;
1711
1712         cmd_pkt->entry_count = (uint8_t)req_cnt;
1713         /* Specify response queue number where completion should happen */
1714         cmd_pkt->entry_status = (uint8_t) rsp->id;
1715         cmd_pkt->timeout = cpu_to_le16(0);
1716         wmb();
1717
1718         /* Adjust ring index. */
1719         req->ring_index++;
1720         if (req->ring_index == req->length) {
1721                 req->ring_index = 0;
1722                 req->ring_ptr = req->ring;
1723         } else
1724                 req->ring_ptr++;
1725
1726         /* Set chip new ring index. */
1727         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1728         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1729
1730         /* Manage unprocessed RIO/ZIO commands in response queue. */
1731         if (vha->flags.process_response_queue &&
1732             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1733                 qla24xx_process_response_queue(vha, rsp);
1734
1735         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1736
1737         return QLA_SUCCESS;
1738
1739 queuing_error:
1740         if (status & QDSS_GOT_Q_SPACE) {
1741                 req->outstanding_cmds[handle] = NULL;
1742                 req->cnt += req_cnt;
1743         }
1744         /* Cleanup will be performed by the caller (queuecommand) */
1745
1746         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1747         return QLA_FUNCTION_FAILED;
1748 }
1749
1750 /**
1751  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1752  * @sp: command to send to the ISP
1753  *
1754  * Returns non-zero if a failure occurred, else zero.
1755  */
1756 static int
1757 qla2xxx_start_scsi_mq(srb_t *sp)
1758 {
1759         int             nseg;
1760         unsigned long   flags;
1761         uint32_t        *clr_ptr;
1762         uint32_t        index;
1763         uint32_t        handle;
1764         struct cmd_type_7 *cmd_pkt;
1765         uint16_t        cnt;
1766         uint16_t        req_cnt;
1767         uint16_t        tot_dsds;
1768         struct req_que *req = NULL;
1769         struct rsp_que *rsp = NULL;
1770         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1771         struct scsi_qla_host *vha = sp->fcport->vha;
1772         struct qla_hw_data *ha = vha->hw;
1773         struct qla_qpair *qpair = sp->qpair;
1774
1775         /* Acquire qpair specific lock */
1776         spin_lock_irqsave(&qpair->qp_lock, flags);
1777
1778         /* Setup qpair pointers */
1779         rsp = qpair->rsp;
1780         req = qpair->req;
1781
1782         /* So we know we haven't pci_map'ed anything yet */
1783         tot_dsds = 0;
1784
1785         /* Send marker if required */
1786         if (vha->marker_needed != 0) {
1787                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1788                     QLA_SUCCESS) {
1789                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1790                         return QLA_FUNCTION_FAILED;
1791                 }
1792                 vha->marker_needed = 0;
1793         }
1794
1795         /* Check for room in outstanding command list. */
1796         handle = req->current_outstanding_cmd;
1797         for (index = 1; index < req->num_outstanding_cmds; index++) {
1798                 handle++;
1799                 if (handle == req->num_outstanding_cmds)
1800                         handle = 1;
1801                 if (!req->outstanding_cmds[handle])
1802                         break;
1803         }
1804         if (index == req->num_outstanding_cmds)
1805                 goto queuing_error;
1806
1807         /* Map the sg table so we have an accurate count of sg entries needed */
1808         if (scsi_sg_count(cmd)) {
1809                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1810                     scsi_sg_count(cmd), cmd->sc_data_direction);
1811                 if (unlikely(!nseg))
1812                         goto queuing_error;
1813         } else
1814                 nseg = 0;
1815
1816         tot_dsds = nseg;
1817         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1818         if (req->cnt < (req_cnt + 2)) {
1819                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1820                     RD_REG_DWORD_RELAXED(req->req_q_out);
1821                 if (req->ring_index < cnt)
1822                         req->cnt = cnt - req->ring_index;
1823                 else
1824                         req->cnt = req->length -
1825                                 (req->ring_index - cnt);
1826                 if (req->cnt < (req_cnt + 2))
1827                         goto queuing_error;
1828         }
1829
1830         /* Build command packet. */
1831         req->current_outstanding_cmd = handle;
1832         req->outstanding_cmds[handle] = sp;
1833         sp->handle = handle;
1834         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1835         req->cnt -= req_cnt;
1836
1837         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1838         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1839
1840         /* Zero out remaining portion of packet. */
1841         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1842         clr_ptr = (uint32_t *)cmd_pkt + 2;
1843         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1844         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1845
1846         /* Set NPORT-ID and LUN number*/
1847         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1848         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1849         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1850         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1851         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1852
1853         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1854         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1855
1856         cmd_pkt->task = TSK_SIMPLE;
1857
1858         /* Load SCSI command packet. */
1859         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1860         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1861
1862         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1863
1864         /* Build IOCB segments */
1865         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1866
1867         /* Set total data segment count. */
1868         cmd_pkt->entry_count = (uint8_t)req_cnt;
1869         wmb();
1870         /* Adjust ring index. */
1871         req->ring_index++;
1872         if (req->ring_index == req->length) {
1873                 req->ring_index = 0;
1874                 req->ring_ptr = req->ring;
1875         } else
1876                 req->ring_ptr++;
1877
1878         sp->flags |= SRB_DMA_VALID;
1879
1880         /* Set chip new ring index. */
1881         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1882
1883         /* Manage unprocessed RIO/ZIO commands in response queue. */
1884         if (vha->flags.process_response_queue &&
1885                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1886                 qla24xx_process_response_queue(vha, rsp);
1887
1888         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1889         return QLA_SUCCESS;
1890
1891 queuing_error:
1892         if (tot_dsds)
1893                 scsi_dma_unmap(cmd);
1894
1895         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1896
1897         return QLA_FUNCTION_FAILED;
1898 }
1899
1900
1901 /**
1902  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1903  * @sp: command to send to the ISP
1904  *
1905  * Returns non-zero if a failure occurred, else zero.
1906  */
1907 int
1908 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1909 {
1910         int                     nseg;
1911         unsigned long           flags;
1912         uint32_t                *clr_ptr;
1913         uint32_t                index;
1914         uint32_t                handle;
1915         uint16_t                cnt;
1916         uint16_t                req_cnt = 0;
1917         uint16_t                tot_dsds;
1918         uint16_t                tot_prot_dsds;
1919         uint16_t                fw_prot_opts = 0;
1920         struct req_que          *req = NULL;
1921         struct rsp_que          *rsp = NULL;
1922         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1923         struct scsi_qla_host    *vha = sp->fcport->vha;
1924         struct qla_hw_data      *ha = vha->hw;
1925         struct cmd_type_crc_2   *cmd_pkt;
1926         uint32_t                status = 0;
1927         struct qla_qpair        *qpair = sp->qpair;
1928
1929 #define QDSS_GOT_Q_SPACE        BIT_0
1930
1931         /* Check for host side state */
1932         if (!qpair->online) {
1933                 cmd->result = DID_NO_CONNECT << 16;
1934                 return QLA_INTERFACE_ERROR;
1935         }
1936
1937         if (!qpair->difdix_supported &&
1938                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1939                 cmd->result = DID_NO_CONNECT << 16;
1940                 return QLA_INTERFACE_ERROR;
1941         }
1942
1943         /* Only process protection or >16 cdb in this routine */
1944         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1945                 if (cmd->cmd_len <= 16)
1946                         return qla2xxx_start_scsi_mq(sp);
1947         }
1948
1949         spin_lock_irqsave(&qpair->qp_lock, flags);
1950
1951         /* Setup qpair pointers */
1952         rsp = qpair->rsp;
1953         req = qpair->req;
1954
1955         /* So we know we haven't pci_map'ed anything yet */
1956         tot_dsds = 0;
1957
1958         /* Send marker if required */
1959         if (vha->marker_needed != 0) {
1960                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1961                     QLA_SUCCESS) {
1962                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1963                         return QLA_FUNCTION_FAILED;
1964                 }
1965                 vha->marker_needed = 0;
1966         }
1967
1968         /* Check for room in outstanding command list. */
1969         handle = req->current_outstanding_cmd;
1970         for (index = 1; index < req->num_outstanding_cmds; index++) {
1971                 handle++;
1972                 if (handle == req->num_outstanding_cmds)
1973                         handle = 1;
1974                 if (!req->outstanding_cmds[handle])
1975                         break;
1976         }
1977
1978         if (index == req->num_outstanding_cmds)
1979                 goto queuing_error;
1980
1981         /* Compute number of required data segments */
1982         /* Map the sg table so we have an accurate count of sg entries needed */
1983         if (scsi_sg_count(cmd)) {
1984                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1985                     scsi_sg_count(cmd), cmd->sc_data_direction);
1986                 if (unlikely(!nseg))
1987                         goto queuing_error;
1988                 else
1989                         sp->flags |= SRB_DMA_VALID;
1990
1991                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1992                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1993                         struct qla2_sgx sgx;
1994                         uint32_t        partial;
1995
1996                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1997                         sgx.tot_bytes = scsi_bufflen(cmd);
1998                         sgx.cur_sg = scsi_sglist(cmd);
1999                         sgx.sp = sp;
2000
2001                         nseg = 0;
2002                         while (qla24xx_get_one_block_sg(
2003                             cmd->device->sector_size, &sgx, &partial))
2004                                 nseg++;
2005                 }
2006         } else
2007                 nseg = 0;
2008
2009         /* number of required data segments */
2010         tot_dsds = nseg;
2011
2012         /* Compute number of required protection segments */
2013         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2014                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2015                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2016                 if (unlikely(!nseg))
2017                         goto queuing_error;
2018                 else
2019                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2020
2021                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2022                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2023                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2024                 }
2025         } else {
2026                 nseg = 0;
2027         }
2028
2029         req_cnt = 1;
2030         /* Total Data and protection sg segment(s) */
2031         tot_prot_dsds = nseg;
2032         tot_dsds += nseg;
2033         if (req->cnt < (req_cnt + 2)) {
2034                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2035                     RD_REG_DWORD_RELAXED(req->req_q_out);
2036                 if (req->ring_index < cnt)
2037                         req->cnt = cnt - req->ring_index;
2038                 else
2039                         req->cnt = req->length -
2040                                 (req->ring_index - cnt);
2041                 if (req->cnt < (req_cnt + 2))
2042                         goto queuing_error;
2043         }
2044
2045         status |= QDSS_GOT_Q_SPACE;
2046
2047         /* Build header part of command packet (excluding the OPCODE). */
2048         req->current_outstanding_cmd = handle;
2049         req->outstanding_cmds[handle] = sp;
2050         sp->handle = handle;
2051         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2052         req->cnt -= req_cnt;
2053
2054         /* Fill-in common area */
2055         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2056         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2057
2058         clr_ptr = (uint32_t *)cmd_pkt + 2;
2059         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2060
2061         /* Set NPORT-ID and LUN number*/
2062         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2063         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2064         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2065         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2066
2067         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2068         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2069
2070         /* Total Data and protection segment(s) */
2071         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2072
2073         /* Build IOCB segments and adjust for data protection segments */
2074         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2075             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2076                 QLA_SUCCESS)
2077                 goto queuing_error;
2078
2079         cmd_pkt->entry_count = (uint8_t)req_cnt;
2080         cmd_pkt->timeout = cpu_to_le16(0);
2081         wmb();
2082
2083         /* Adjust ring index. */
2084         req->ring_index++;
2085         if (req->ring_index == req->length) {
2086                 req->ring_index = 0;
2087                 req->ring_ptr = req->ring;
2088         } else
2089                 req->ring_ptr++;
2090
2091         /* Set chip new ring index. */
2092         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2093
2094         /* Manage unprocessed RIO/ZIO commands in response queue. */
2095         if (vha->flags.process_response_queue &&
2096             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2097                 qla24xx_process_response_queue(vha, rsp);
2098
2099         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2100
2101         return QLA_SUCCESS;
2102
2103 queuing_error:
2104         if (status & QDSS_GOT_Q_SPACE) {
2105                 req->outstanding_cmds[handle] = NULL;
2106                 req->cnt += req_cnt;
2107         }
2108         /* Cleanup will be performed by the caller (queuecommand) */
2109
2110         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2111         return QLA_FUNCTION_FAILED;
2112 }
2113
2114 /* Generic Control-SRB manipulation functions. */
2115
2116 /* hardware_lock assumed to be held. */
2117
2118 void *
2119 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2120 {
2121         scsi_qla_host_t *vha = qpair->vha;
2122         struct qla_hw_data *ha = vha->hw;
2123         struct req_que *req = qpair->req;
2124         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2125         uint32_t index, handle;
2126         request_t *pkt;
2127         uint16_t cnt, req_cnt;
2128
2129         pkt = NULL;
2130         req_cnt = 1;
2131         handle = 0;
2132
2133         if (!sp)
2134                 goto skip_cmd_array;
2135
2136         /* Check for room in outstanding command list. */
2137         handle = req->current_outstanding_cmd;
2138         for (index = 1; index < req->num_outstanding_cmds; index++) {
2139                 handle++;
2140                 if (handle == req->num_outstanding_cmds)
2141                         handle = 1;
2142                 if (!req->outstanding_cmds[handle])
2143                         break;
2144         }
2145         if (index == req->num_outstanding_cmds) {
2146                 ql_log(ql_log_warn, vha, 0x700b,
2147                     "No room on outstanding cmd array.\n");
2148                 goto queuing_error;
2149         }
2150
2151         /* Prep command array. */
2152         req->current_outstanding_cmd = handle;
2153         req->outstanding_cmds[handle] = sp;
2154         sp->handle = handle;
2155
2156         /* Adjust entry-counts as needed. */
2157         if (sp->type != SRB_SCSI_CMD)
2158                 req_cnt = sp->iocbs;
2159
2160 skip_cmd_array:
2161         /* Check for room on request queue. */
2162         if (req->cnt < req_cnt + 2) {
2163                 if (qpair->use_shadow_reg)
2164                         cnt = *req->out_ptr;
2165                 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2166                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2167                 else if (IS_P3P_TYPE(ha))
2168                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2169                 else if (IS_FWI2_CAPABLE(ha))
2170                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2171                 else if (IS_QLAFX00(ha))
2172                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2173                 else
2174                         cnt = qla2x00_debounce_register(
2175                             ISP_REQ_Q_OUT(ha, &reg->isp));
2176
2177                 if  (req->ring_index < cnt)
2178                         req->cnt = cnt - req->ring_index;
2179                 else
2180                         req->cnt = req->length -
2181                             (req->ring_index - cnt);
2182         }
2183         if (req->cnt < req_cnt + 2)
2184                 goto queuing_error;
2185
2186         /* Prep packet */
2187         req->cnt -= req_cnt;
2188         pkt = req->ring_ptr;
2189         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2190         if (IS_QLAFX00(ha)) {
2191                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2192                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2193         } else {
2194                 pkt->entry_count = req_cnt;
2195                 pkt->handle = handle;
2196         }
2197
2198 queuing_error:
2199         qpair->tgt_counters.num_alloc_iocb_failed++;
2200         return pkt;
2201 }
2202
2203 void *
2204 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2205 {
2206         scsi_qla_host_t *vha = qpair->vha;
2207
2208         if (qla2x00_reset_active(vha))
2209                 return NULL;
2210
2211         return __qla2x00_alloc_iocbs(qpair, sp);
2212 }
2213
2214 void *
2215 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2216 {
2217         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2218 }
2219
2220 static void
2221 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2222 {
2223         struct srb_iocb *lio = &sp->u.iocb_cmd;
2224
2225         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2226         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2227         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2228                 logio->control_flags |= LCF_NVME_PRLI;
2229
2230         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2231         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2232         logio->port_id[1] = sp->fcport->d_id.b.area;
2233         logio->port_id[2] = sp->fcport->d_id.b.domain;
2234         logio->vp_index = sp->vha->vp_idx;
2235 }
2236
2237 static void
2238 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2239 {
2240         struct srb_iocb *lio = &sp->u.iocb_cmd;
2241
2242         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2243         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2244
2245         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2246                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2247         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2248                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2249         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2250         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2251         logio->port_id[1] = sp->fcport->d_id.b.area;
2252         logio->port_id[2] = sp->fcport->d_id.b.domain;
2253         logio->vp_index = sp->vha->vp_idx;
2254 }
2255
2256 static void
2257 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2258 {
2259         struct qla_hw_data *ha = sp->vha->hw;
2260         struct srb_iocb *lio = &sp->u.iocb_cmd;
2261         uint16_t opts;
2262
2263         mbx->entry_type = MBX_IOCB_TYPE;
2264         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2265         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2266         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2267         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2268         if (HAS_EXTENDED_IDS(ha)) {
2269                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2270                 mbx->mb10 = cpu_to_le16(opts);
2271         } else {
2272                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2273         }
2274         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2275         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2276             sp->fcport->d_id.b.al_pa);
2277         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2278 }
2279
2280 static void
2281 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2282 {
2283         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2284         logio->control_flags =
2285             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2286         if (!sp->fcport->se_sess ||
2287             !sp->fcport->keep_nport_handle)
2288                 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2289         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2290         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2291         logio->port_id[1] = sp->fcport->d_id.b.area;
2292         logio->port_id[2] = sp->fcport->d_id.b.domain;
2293         logio->vp_index = sp->vha->vp_idx;
2294 }
2295
2296 static void
2297 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2298 {
2299         struct qla_hw_data *ha = sp->vha->hw;
2300
2301         mbx->entry_type = MBX_IOCB_TYPE;
2302         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2303         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2304         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2305             cpu_to_le16(sp->fcport->loop_id):
2306             cpu_to_le16(sp->fcport->loop_id << 8);
2307         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2308         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2309             sp->fcport->d_id.b.al_pa);
2310         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2311         /* Implicit: mbx->mbx10 = 0. */
2312 }
2313
2314 static void
2315 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2316 {
2317         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2318         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2319         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2320         logio->vp_index = sp->vha->vp_idx;
2321 }
2322
2323 static void
2324 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2325 {
2326         struct qla_hw_data *ha = sp->vha->hw;
2327
2328         mbx->entry_type = MBX_IOCB_TYPE;
2329         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2330         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2331         if (HAS_EXTENDED_IDS(ha)) {
2332                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2333                 mbx->mb10 = cpu_to_le16(BIT_0);
2334         } else {
2335                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2336         }
2337         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2338         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2339         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2340         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2341         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2342 }
2343
2344 static void
2345 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2346 {
2347         uint32_t flags;
2348         uint64_t lun;
2349         struct fc_port *fcport = sp->fcport;
2350         scsi_qla_host_t *vha = fcport->vha;
2351         struct qla_hw_data *ha = vha->hw;
2352         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2353         struct req_que *req = vha->req;
2354
2355         flags = iocb->u.tmf.flags;
2356         lun = iocb->u.tmf.lun;
2357
2358         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2359         tsk->entry_count = 1;
2360         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2361         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2362         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2363         tsk->control_flags = cpu_to_le32(flags);
2364         tsk->port_id[0] = fcport->d_id.b.al_pa;
2365         tsk->port_id[1] = fcport->d_id.b.area;
2366         tsk->port_id[2] = fcport->d_id.b.domain;
2367         tsk->vp_index = fcport->vha->vp_idx;
2368
2369         if (flags == TCF_LUN_RESET) {
2370                 int_to_scsilun(lun, &tsk->lun);
2371                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2372                         sizeof(tsk->lun));
2373         }
2374 }
2375
2376 static void
2377 qla2x00_els_dcmd_sp_free(void *data)
2378 {
2379         srb_t *sp = data;
2380         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2381
2382         kfree(sp->fcport);
2383
2384         if (elsio->u.els_logo.els_logo_pyld)
2385                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2386                     elsio->u.els_logo.els_logo_pyld,
2387                     elsio->u.els_logo.els_logo_pyld_dma);
2388
2389         del_timer(&elsio->timer);
2390         qla2x00_rel_sp(sp);
2391 }
2392
2393 static void
2394 qla2x00_els_dcmd_iocb_timeout(void *data)
2395 {
2396         srb_t *sp = data;
2397         fc_port_t *fcport = sp->fcport;
2398         struct scsi_qla_host *vha = sp->vha;
2399         struct srb_iocb *lio = &sp->u.iocb_cmd;
2400
2401         ql_dbg(ql_dbg_io, vha, 0x3069,
2402             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2403             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2404             fcport->d_id.b.al_pa);
2405
2406         complete(&lio->u.els_logo.comp);
2407 }
2408
2409 static void
2410 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2411 {
2412         srb_t *sp = ptr;
2413         fc_port_t *fcport = sp->fcport;
2414         struct srb_iocb *lio = &sp->u.iocb_cmd;
2415         struct scsi_qla_host *vha = sp->vha;
2416
2417         ql_dbg(ql_dbg_io, vha, 0x3072,
2418             "%s hdl=%x, portid=%02x%02x%02x done\n",
2419             sp->name, sp->handle, fcport->d_id.b.domain,
2420             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2421
2422         complete(&lio->u.els_logo.comp);
2423 }
2424
2425 int
2426 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2427     port_id_t remote_did)
2428 {
2429         srb_t *sp;
2430         fc_port_t *fcport = NULL;
2431         struct srb_iocb *elsio = NULL;
2432         struct qla_hw_data *ha = vha->hw;
2433         struct els_logo_payload logo_pyld;
2434         int rval = QLA_SUCCESS;
2435
2436         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2437         if (!fcport) {
2438                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2439                return -ENOMEM;
2440         }
2441
2442         /* Alloc SRB structure */
2443         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2444         if (!sp) {
2445                 kfree(fcport);
2446                 ql_log(ql_log_info, vha, 0x70e6,
2447                  "SRB allocation failed\n");
2448                 return -ENOMEM;
2449         }
2450
2451         elsio = &sp->u.iocb_cmd;
2452         fcport->loop_id = 0xFFFF;
2453         fcport->d_id.b.domain = remote_did.b.domain;
2454         fcport->d_id.b.area = remote_did.b.area;
2455         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2456
2457         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2458             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2459
2460         sp->type = SRB_ELS_DCMD;
2461         sp->name = "ELS_DCMD";
2462         sp->fcport = fcport;
2463         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2464         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2465         sp->done = qla2x00_els_dcmd_sp_done;
2466         sp->free = qla2x00_els_dcmd_sp_free;
2467
2468         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2469                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2470                             GFP_KERNEL);
2471
2472         if (!elsio->u.els_logo.els_logo_pyld) {
2473                 sp->free(sp);
2474                 return QLA_FUNCTION_FAILED;
2475         }
2476
2477         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2478
2479         elsio->u.els_logo.els_cmd = els_opcode;
2480         logo_pyld.opcode = els_opcode;
2481         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2482         logo_pyld.s_id[1] = vha->d_id.b.area;
2483         logo_pyld.s_id[2] = vha->d_id.b.domain;
2484         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2485         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2486
2487         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2488             sizeof(struct els_logo_payload));
2489
2490         rval = qla2x00_start_sp(sp);
2491         if (rval != QLA_SUCCESS) {
2492                 sp->free(sp);
2493                 return QLA_FUNCTION_FAILED;
2494         }
2495
2496         ql_dbg(ql_dbg_io, vha, 0x3074,
2497             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2498             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2499             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2500
2501         wait_for_completion(&elsio->u.els_logo.comp);
2502
2503         sp->free(sp);
2504         return rval;
2505 }
2506
2507 static void
2508 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2509 {
2510         scsi_qla_host_t *vha = sp->vha;
2511         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2512         uint32_t        dsd_len = 24;
2513
2514         els_iocb->entry_type = ELS_IOCB_TYPE;
2515         els_iocb->entry_count = 1;
2516         els_iocb->sys_define = 0;
2517         els_iocb->entry_status = 0;
2518         els_iocb->handle = sp->handle;
2519         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2520         els_iocb->tx_dsd_count = 1;
2521         els_iocb->vp_index = vha->vp_idx;
2522         els_iocb->sof_type = EST_SOFI3;
2523         els_iocb->rx_dsd_count = 0;
2524         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2525
2526         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2527         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2528         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2529         els_iocb->s_id[0] = vha->d_id.b.al_pa;
2530         els_iocb->s_id[1] = vha->d_id.b.area;
2531         els_iocb->s_id[2] = vha->d_id.b.domain;
2532         els_iocb->control_flags = 0;
2533
2534         if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2535                 els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
2536                 els_iocb->tx_address[0] =
2537                         cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2538                 els_iocb->tx_address[1] =
2539                         cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2540                 els_iocb->tx_len = dsd_len;
2541
2542                 els_iocb->rx_dsd_count = 1;
2543                 els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
2544                 els_iocb->rx_address[0] =
2545                         cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2546                 els_iocb->rx_address[1] =
2547                         cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2548                 els_iocb->rx_len = dsd_len;
2549                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2550                     "PLOGI ELS IOCB:\n");
2551                 ql_dump_buffer(ql_log_info, vha, 0x0109,
2552                     (uint8_t *)els_iocb, 0x70);
2553         } else {
2554                 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2555                 els_iocb->tx_address[0] =
2556                     cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2557                 els_iocb->tx_address[1] =
2558                     cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2559                 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2560
2561                 els_iocb->rx_byte_count = 0;
2562                 els_iocb->rx_address[0] = 0;
2563                 els_iocb->rx_address[1] = 0;
2564                 els_iocb->rx_len = 0;
2565         }
2566
2567         sp->vha->qla_stats.control_requests++;
2568 }
2569
2570 static void
2571 qla2x00_els_dcmd2_sp_free(void *data)
2572 {
2573         srb_t *sp = data;
2574         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2575
2576         if (elsio->u.els_plogi.els_plogi_pyld)
2577                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2578                     elsio->u.els_plogi.els_plogi_pyld,
2579                     elsio->u.els_plogi.els_plogi_pyld_dma);
2580
2581         if (elsio->u.els_plogi.els_resp_pyld)
2582                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2583                     elsio->u.els_plogi.els_resp_pyld,
2584                     elsio->u.els_plogi.els_resp_pyld_dma);
2585
2586         del_timer(&elsio->timer);
2587         qla2x00_rel_sp(sp);
2588 }
2589
2590 static void
2591 qla2x00_els_dcmd2_iocb_timeout(void *data)
2592 {
2593         srb_t *sp = data;
2594         fc_port_t *fcport = sp->fcport;
2595         struct scsi_qla_host *vha = sp->vha;
2596         struct qla_hw_data *ha = vha->hw;
2597         struct srb_iocb *lio = &sp->u.iocb_cmd;
2598         unsigned long flags = 0;
2599         int res;
2600
2601         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2602             "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2603             sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2604
2605         /* Abort the exchange */
2606         spin_lock_irqsave(&ha->hardware_lock, flags);
2607         res = ha->isp_ops->abort_command(sp);
2608         ql_dbg(ql_dbg_io, vha, 0x3070,
2609             "mbx abort_command %s\n",
2610             (res == QLA_SUCCESS) ? "successful" : "failed");
2611         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2612
2613         complete(&lio->u.els_plogi.comp);
2614 }
2615
2616 static void
2617 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2618 {
2619         srb_t *sp = ptr;
2620         fc_port_t *fcport = sp->fcport;
2621         struct srb_iocb *lio = &sp->u.iocb_cmd;
2622         struct scsi_qla_host *vha = sp->vha;
2623
2624         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
2625             "%s ELS hdl=%x, portid=%06x done %8phC\n",
2626             sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
2627
2628         complete(&lio->u.els_plogi.comp);
2629 }
2630
2631 int
2632 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2633                        fc_port_t *fcport, port_id_t remote_did)
2634 {
2635         srb_t *sp;
2636         struct srb_iocb *elsio = NULL;
2637         struct qla_hw_data *ha = vha->hw;
2638         int rval = QLA_SUCCESS;
2639         void    *ptr, *resp_ptr;
2640         dma_addr_t ptr_dma;
2641
2642         /* Alloc SRB structure */
2643         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2644         if (!sp) {
2645                 ql_log(ql_log_info, vha, 0x70e6,
2646                  "SRB allocation failed\n");
2647                 return -ENOMEM;
2648         }
2649
2650         elsio = &sp->u.iocb_cmd;
2651         fcport->d_id.b.domain = remote_did.b.domain;
2652         fcport->d_id.b.area = remote_did.b.area;
2653         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2654
2655         ql_dbg(ql_dbg_io, vha, 0x3073,
2656             "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2657
2658         sp->type = SRB_ELS_DCMD;
2659         sp->name = "ELS_DCMD";
2660         sp->fcport = fcport;
2661
2662         elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2663         init_completion(&elsio->u.els_plogi.comp);
2664         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2665
2666         sp->done = qla2x00_els_dcmd2_sp_done;
2667         sp->free = qla2x00_els_dcmd2_sp_free;
2668
2669         ptr = elsio->u.els_plogi.els_plogi_pyld =
2670             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2671                 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2672         ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
2673
2674         if (!elsio->u.els_plogi.els_plogi_pyld) {
2675                 rval = QLA_FUNCTION_FAILED;
2676                 goto out;
2677         }
2678
2679         resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2680             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2681                 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2682
2683         if (!elsio->u.els_plogi.els_resp_pyld) {
2684                 rval = QLA_FUNCTION_FAILED;
2685                 goto out;
2686         }
2687
2688         ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2689
2690         memset(ptr, 0, sizeof(struct els_plogi_payload));
2691         memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2692         elsio->u.els_plogi.els_cmd = els_opcode;
2693         elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2694         qla24xx_get_port_login_templ(vha, ptr_dma + 4,
2695                 &elsio->u.els_plogi.els_plogi_pyld->data[0],
2696                 sizeof(struct els_plogi_payload));
2697
2698         ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2699         ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
2700             (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2701
2702         rval = qla2x00_start_sp(sp);
2703         if (rval != QLA_SUCCESS) {
2704                 rval = QLA_FUNCTION_FAILED;
2705                 goto out;
2706         }
2707
2708         ql_dbg(ql_dbg_io, vha, 0x3074,
2709             "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
2710             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
2711
2712         wait_for_completion(&elsio->u.els_plogi.comp);
2713
2714         if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2715                 rval = QLA_FUNCTION_FAILED;
2716
2717 out:
2718         sp->free(sp);
2719         return rval;
2720 }
2721
2722 static void
2723 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2724 {
2725         struct bsg_job *bsg_job = sp->u.bsg_job;
2726         struct fc_bsg_request *bsg_request = bsg_job->request;
2727
2728         els_iocb->entry_type = ELS_IOCB_TYPE;
2729         els_iocb->entry_count = 1;
2730         els_iocb->sys_define = 0;
2731         els_iocb->entry_status = 0;
2732         els_iocb->handle = sp->handle;
2733         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2734         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2735         els_iocb->vp_index = sp->vha->vp_idx;
2736         els_iocb->sof_type = EST_SOFI3;
2737         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2738
2739         els_iocb->opcode =
2740             sp->type == SRB_ELS_CMD_RPT ?
2741             bsg_request->rqst_data.r_els.els_code :
2742             bsg_request->rqst_data.h_els.command_code;
2743         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2744         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2745         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2746         els_iocb->control_flags = 0;
2747         els_iocb->rx_byte_count =
2748             cpu_to_le32(bsg_job->reply_payload.payload_len);
2749         els_iocb->tx_byte_count =
2750             cpu_to_le32(bsg_job->request_payload.payload_len);
2751
2752         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2753             (bsg_job->request_payload.sg_list)));
2754         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2755             (bsg_job->request_payload.sg_list)));
2756         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2757             (bsg_job->request_payload.sg_list));
2758
2759         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2760             (bsg_job->reply_payload.sg_list)));
2761         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2762             (bsg_job->reply_payload.sg_list)));
2763         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2764             (bsg_job->reply_payload.sg_list));
2765
2766         sp->vha->qla_stats.control_requests++;
2767 }
2768
2769 static void
2770 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2771 {
2772         uint16_t        avail_dsds;
2773         uint32_t        *cur_dsd;
2774         struct scatterlist *sg;
2775         int index;
2776         uint16_t tot_dsds;
2777         scsi_qla_host_t *vha = sp->vha;
2778         struct qla_hw_data *ha = vha->hw;
2779         struct bsg_job *bsg_job = sp->u.bsg_job;
2780         int loop_iterartion = 0;
2781         int entry_count = 1;
2782
2783         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2784         ct_iocb->entry_type = CT_IOCB_TYPE;
2785         ct_iocb->entry_status = 0;
2786         ct_iocb->handle1 = sp->handle;
2787         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2788         ct_iocb->status = cpu_to_le16(0);
2789         ct_iocb->control_flags = cpu_to_le16(0);
2790         ct_iocb->timeout = 0;
2791         ct_iocb->cmd_dsd_count =
2792             cpu_to_le16(bsg_job->request_payload.sg_cnt);
2793         ct_iocb->total_dsd_count =
2794             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2795         ct_iocb->req_bytecount =
2796             cpu_to_le32(bsg_job->request_payload.payload_len);
2797         ct_iocb->rsp_bytecount =
2798             cpu_to_le32(bsg_job->reply_payload.payload_len);
2799
2800         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2801             (bsg_job->request_payload.sg_list)));
2802         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2803             (bsg_job->request_payload.sg_list)));
2804         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2805
2806         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2807             (bsg_job->reply_payload.sg_list)));
2808         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2809             (bsg_job->reply_payload.sg_list)));
2810         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2811
2812         avail_dsds = 1;
2813         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2814         index = 0;
2815         tot_dsds = bsg_job->reply_payload.sg_cnt;
2816
2817         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2818                 dma_addr_t       sle_dma;
2819                 cont_a64_entry_t *cont_pkt;
2820
2821                 /* Allocate additional continuation packets? */
2822                 if (avail_dsds == 0) {
2823                         /*
2824                         * Five DSDs are available in the Cont.
2825                         * Type 1 IOCB.
2826                                */
2827                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2828                             vha->hw->req_q_map[0]);
2829                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2830                         avail_dsds = 5;
2831                         entry_count++;
2832                 }
2833
2834                 sle_dma = sg_dma_address(sg);
2835                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2836                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2837                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2838                 loop_iterartion++;
2839                 avail_dsds--;
2840         }
2841         ct_iocb->entry_count = entry_count;
2842
2843         sp->vha->qla_stats.control_requests++;
2844 }
2845
2846 static void
2847 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2848 {
2849         uint16_t        avail_dsds;
2850         uint32_t        *cur_dsd;
2851         struct scatterlist *sg;
2852         int index;
2853         uint16_t cmd_dsds, rsp_dsds;
2854         scsi_qla_host_t *vha = sp->vha;
2855         struct qla_hw_data *ha = vha->hw;
2856         struct bsg_job *bsg_job = sp->u.bsg_job;
2857         int entry_count = 1;
2858         cont_a64_entry_t *cont_pkt = NULL;
2859
2860         ct_iocb->entry_type = CT_IOCB_TYPE;
2861         ct_iocb->entry_status = 0;
2862         ct_iocb->sys_define = 0;
2863         ct_iocb->handle = sp->handle;
2864
2865         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2866         ct_iocb->vp_index = sp->vha->vp_idx;
2867         ct_iocb->comp_status = cpu_to_le16(0);
2868
2869         cmd_dsds = bsg_job->request_payload.sg_cnt;
2870         rsp_dsds = bsg_job->reply_payload.sg_cnt;
2871
2872         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2873         ct_iocb->timeout = 0;
2874         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2875         ct_iocb->cmd_byte_count =
2876             cpu_to_le32(bsg_job->request_payload.payload_len);
2877
2878         avail_dsds = 2;
2879         cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2880         index = 0;
2881
2882         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2883                 dma_addr_t       sle_dma;
2884
2885                 /* Allocate additional continuation packets? */
2886                 if (avail_dsds == 0) {
2887                         /*
2888                          * Five DSDs are available in the Cont.
2889                          * Type 1 IOCB.
2890                          */
2891                         cont_pkt = qla2x00_prep_cont_type1_iocb(
2892                             vha, ha->req_q_map[0]);
2893                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2894                         avail_dsds = 5;
2895                         entry_count++;
2896                 }
2897
2898                 sle_dma = sg_dma_address(sg);
2899                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2900                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2901                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2902                 avail_dsds--;
2903         }
2904
2905         index = 0;
2906
2907         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2908                 dma_addr_t       sle_dma;
2909
2910                 /* Allocate additional continuation packets? */
2911                 if (avail_dsds == 0) {
2912                         /*
2913                         * Five DSDs are available in the Cont.
2914                         * Type 1 IOCB.
2915                                */
2916                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2917                             ha->req_q_map[0]);
2918                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2919                         avail_dsds = 5;
2920                         entry_count++;
2921                 }
2922
2923                 sle_dma = sg_dma_address(sg);
2924                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2925                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2926                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2927                 avail_dsds--;
2928         }
2929         ct_iocb->entry_count = entry_count;
2930 }
2931
2932 /*
2933  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2934  * @sp: command to send to the ISP
2935  *
2936  * Returns non-zero if a failure occurred, else zero.
2937  */
2938 int
2939 qla82xx_start_scsi(srb_t *sp)
2940 {
2941         int             nseg;
2942         unsigned long   flags;
2943         struct scsi_cmnd *cmd;
2944         uint32_t        *clr_ptr;
2945         uint32_t        index;
2946         uint32_t        handle;
2947         uint16_t        cnt;
2948         uint16_t        req_cnt;
2949         uint16_t        tot_dsds;
2950         struct device_reg_82xx __iomem *reg;
2951         uint32_t dbval;
2952         uint32_t *fcp_dl;
2953         uint8_t additional_cdb_len;
2954         struct ct6_dsd *ctx;
2955         struct scsi_qla_host *vha = sp->vha;
2956         struct qla_hw_data *ha = vha->hw;
2957         struct req_que *req = NULL;
2958         struct rsp_que *rsp = NULL;
2959
2960         /* Setup device pointers. */
2961         reg = &ha->iobase->isp82;
2962         cmd = GET_CMD_SP(sp);
2963         req = vha->req;
2964         rsp = ha->rsp_q_map[0];
2965
2966         /* So we know we haven't pci_map'ed anything yet */
2967         tot_dsds = 0;
2968
2969         dbval = 0x04 | (ha->portnum << 5);
2970
2971         /* Send marker if required */
2972         if (vha->marker_needed != 0) {
2973                 if (qla2x00_marker(vha, req,
2974                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2975                         ql_log(ql_log_warn, vha, 0x300c,
2976                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2977                         return QLA_FUNCTION_FAILED;
2978                 }
2979                 vha->marker_needed = 0;
2980         }
2981
2982         /* Acquire ring specific lock */
2983         spin_lock_irqsave(&ha->hardware_lock, flags);
2984
2985         /* Check for room in outstanding command list. */
2986         handle = req->current_outstanding_cmd;
2987         for (index = 1; index < req->num_outstanding_cmds; index++) {
2988                 handle++;
2989                 if (handle == req->num_outstanding_cmds)
2990                         handle = 1;
2991                 if (!req->outstanding_cmds[handle])
2992                         break;
2993         }
2994         if (index == req->num_outstanding_cmds)
2995                 goto queuing_error;
2996
2997         /* Map the sg table so we have an accurate count of sg entries needed */
2998         if (scsi_sg_count(cmd)) {
2999                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3000                     scsi_sg_count(cmd), cmd->sc_data_direction);
3001                 if (unlikely(!nseg))
3002                         goto queuing_error;
3003         } else
3004                 nseg = 0;
3005
3006         tot_dsds = nseg;
3007
3008         if (tot_dsds > ql2xshiftctondsd) {
3009                 struct cmd_type_6 *cmd_pkt;
3010                 uint16_t more_dsd_lists = 0;
3011                 struct dsd_dma *dsd_ptr;
3012                 uint16_t i;
3013
3014                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3015                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3016                         ql_dbg(ql_dbg_io, vha, 0x300d,
3017                             "Num of DSD list %d is than %d for cmd=%p.\n",
3018                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3019                             cmd);
3020                         goto queuing_error;
3021                 }
3022
3023                 if (more_dsd_lists <= ha->gbl_dsd_avail)
3024                         goto sufficient_dsds;
3025                 else
3026                         more_dsd_lists -= ha->gbl_dsd_avail;
3027
3028                 for (i = 0; i < more_dsd_lists; i++) {
3029                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3030                         if (!dsd_ptr) {
3031                                 ql_log(ql_log_fatal, vha, 0x300e,
3032                                     "Failed to allocate memory for dsd_dma "
3033                                     "for cmd=%p.\n", cmd);
3034                                 goto queuing_error;
3035                         }
3036
3037                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3038                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3039                         if (!dsd_ptr->dsd_addr) {
3040                                 kfree(dsd_ptr);
3041                                 ql_log(ql_log_fatal, vha, 0x300f,
3042                                     "Failed to allocate memory for dsd_addr "
3043                                     "for cmd=%p.\n", cmd);
3044                                 goto queuing_error;
3045                         }
3046                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3047                         ha->gbl_dsd_avail++;
3048                 }
3049
3050 sufficient_dsds:
3051                 req_cnt = 1;
3052
3053                 if (req->cnt < (req_cnt + 2)) {
3054                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3055                                 &reg->req_q_out[0]);
3056                         if (req->ring_index < cnt)
3057                                 req->cnt = cnt - req->ring_index;
3058                         else
3059                                 req->cnt = req->length -
3060                                         (req->ring_index - cnt);
3061                         if (req->cnt < (req_cnt + 2))
3062                                 goto queuing_error;
3063                 }
3064
3065                 ctx = sp->u.scmd.ctx =
3066                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3067                 if (!ctx) {
3068                         ql_log(ql_log_fatal, vha, 0x3010,
3069                             "Failed to allocate ctx for cmd=%p.\n", cmd);
3070                         goto queuing_error;
3071                 }
3072
3073                 memset(ctx, 0, sizeof(struct ct6_dsd));
3074                 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3075                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3076                 if (!ctx->fcp_cmnd) {
3077                         ql_log(ql_log_fatal, vha, 0x3011,
3078                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3079                         goto queuing_error;
3080                 }
3081
3082                 /* Initialize the DSD list and dma handle */
3083                 INIT_LIST_HEAD(&ctx->dsd_list);
3084                 ctx->dsd_use_cnt = 0;
3085
3086                 if (cmd->cmd_len > 16) {
3087                         additional_cdb_len = cmd->cmd_len - 16;
3088                         if ((cmd->cmd_len % 4) != 0) {
3089                                 /* SCSI command bigger than 16 bytes must be
3090                                  * multiple of 4
3091                                  */
3092                                 ql_log(ql_log_warn, vha, 0x3012,
3093                                     "scsi cmd len %d not multiple of 4 "
3094                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3095                                 goto queuing_error_fcp_cmnd;
3096                         }
3097                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3098                 } else {
3099                         additional_cdb_len = 0;
3100                         ctx->fcp_cmnd_len = 12 + 16 + 4;
3101                 }
3102
3103                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3104                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3105
3106                 /* Zero out remaining portion of packet. */
3107                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3108                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3109                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3110                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3111
3112                 /* Set NPORT-ID and LUN number*/
3113                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3114                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3115                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3116                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3117                 cmd_pkt->vp_index = sp->vha->vp_idx;
3118
3119                 /* Build IOCB segments */
3120                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3121                         goto queuing_error_fcp_cmnd;
3122
3123                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3124                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3125
3126                 /* build FCP_CMND IU */
3127                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3128                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3129
3130                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3131                         ctx->fcp_cmnd->additional_cdb_len |= 1;
3132                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3133                         ctx->fcp_cmnd->additional_cdb_len |= 2;
3134
3135                 /* Populate the FCP_PRIO. */
3136                 if (ha->flags.fcp_prio_enabled)
3137                         ctx->fcp_cmnd->task_attribute |=
3138                             sp->fcport->fcp_prio << 3;
3139
3140                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3141
3142                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3143                     additional_cdb_len);
3144                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3145
3146                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3147                 cmd_pkt->fcp_cmnd_dseg_address[0] =
3148                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3149                 cmd_pkt->fcp_cmnd_dseg_address[1] =
3150                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3151
3152                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3153                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3154                 /* Set total data segment count. */
3155                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3156                 /* Specify response queue number where
3157                  * completion should happen
3158                  */
3159                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3160         } else {
3161                 struct cmd_type_7 *cmd_pkt;
3162                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3163                 if (req->cnt < (req_cnt + 2)) {
3164                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3165                             &reg->req_q_out[0]);
3166                         if (req->ring_index < cnt)
3167                                 req->cnt = cnt - req->ring_index;
3168                         else
3169                                 req->cnt = req->length -
3170                                         (req->ring_index - cnt);
3171                 }
3172                 if (req->cnt < (req_cnt + 2))
3173                         goto queuing_error;
3174
3175                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3176                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3177
3178                 /* Zero out remaining portion of packet. */
3179                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3180                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3181                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3182                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3183
3184                 /* Set NPORT-ID and LUN number*/
3185                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3186                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3187                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3188                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3189                 cmd_pkt->vp_index = sp->vha->vp_idx;
3190
3191                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3192                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3193                     sizeof(cmd_pkt->lun));
3194
3195                 /* Populate the FCP_PRIO. */
3196                 if (ha->flags.fcp_prio_enabled)
3197                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3198
3199                 /* Load SCSI command packet. */
3200                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3201                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3202
3203                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3204
3205                 /* Build IOCB segments */
3206                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3207
3208                 /* Set total data segment count. */
3209                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3210                 /* Specify response queue number where
3211                  * completion should happen.
3212                  */
3213                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3214
3215         }
3216         /* Build command packet. */
3217         req->current_outstanding_cmd = handle;
3218         req->outstanding_cmds[handle] = sp;
3219         sp->handle = handle;
3220         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3221         req->cnt -= req_cnt;
3222         wmb();
3223
3224         /* Adjust ring index. */
3225         req->ring_index++;
3226         if (req->ring_index == req->length) {
3227                 req->ring_index = 0;
3228                 req->ring_ptr = req->ring;
3229         } else
3230                 req->ring_ptr++;
3231
3232         sp->flags |= SRB_DMA_VALID;
3233
3234         /* Set chip new ring index. */
3235         /* write, read and verify logic */
3236         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3237         if (ql2xdbwr)
3238                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3239         else {
3240                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3241                 wmb();
3242                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3243                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3244                         wmb();
3245                 }
3246         }
3247
3248         /* Manage unprocessed RIO/ZIO commands in response queue. */
3249         if (vha->flags.process_response_queue &&
3250             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3251                 qla24xx_process_response_queue(vha, rsp);
3252
3253         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3254         return QLA_SUCCESS;
3255
3256 queuing_error_fcp_cmnd:
3257         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3258 queuing_error:
3259         if (tot_dsds)
3260                 scsi_dma_unmap(cmd);
3261
3262         if (sp->u.scmd.ctx) {
3263                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3264                 sp->u.scmd.ctx = NULL;
3265         }
3266         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3267
3268         return QLA_FUNCTION_FAILED;
3269 }
3270
3271 static void
3272 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3273 {
3274         struct srb_iocb *aio = &sp->u.iocb_cmd;
3275         scsi_qla_host_t *vha = sp->vha;
3276         struct req_que *req = vha->req;
3277
3278         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3279         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3280         abt_iocb->entry_count = 1;
3281         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3282         abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3283         abt_iocb->handle_to_abort =
3284             cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3285                                     aio->u.abt.cmd_hndl));
3286         abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3287         abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3288         abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3289         abt_iocb->vp_index = vha->vp_idx;
3290         abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3291         /* Send the command to the firmware */
3292         wmb();
3293 }
3294
3295 static void
3296 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3297 {
3298         int i, sz;
3299
3300         mbx->entry_type = MBX_IOCB_TYPE;
3301         mbx->handle = sp->handle;
3302         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3303
3304         for (i = 0; i < sz; i++)
3305                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3306 }
3307
3308 static void
3309 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3310 {
3311         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3312         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3313         ct_pkt->handle = sp->handle;
3314 }
3315
3316 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3317         struct nack_to_isp *nack)
3318 {
3319         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3320
3321         nack->entry_type = NOTIFY_ACK_TYPE;
3322         nack->entry_count = 1;
3323         nack->ox_id = ntfy->ox_id;
3324
3325         nack->u.isp24.handle = sp->handle;
3326         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3327         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3328                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3329                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3330         }
3331         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3332         nack->u.isp24.status = ntfy->u.isp24.status;
3333         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3334         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3335         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3336         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3337         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3338         nack->u.isp24.srr_flags = 0;
3339         nack->u.isp24.srr_reject_code = 0;
3340         nack->u.isp24.srr_reject_code_expl = 0;
3341         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3342 }
3343
3344 /*
3345  * Build NVME LS request
3346  */
3347 static int
3348 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3349 {
3350         struct srb_iocb *nvme;
3351         int     rval = QLA_SUCCESS;
3352
3353         nvme = &sp->u.iocb_cmd;
3354         cmd_pkt->entry_type = PT_LS4_REQUEST;
3355         cmd_pkt->entry_count = 1;
3356         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3357
3358         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3359         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3360         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3361
3362         cmd_pkt->tx_dseg_count = 1;
3363         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3364         cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3365         cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3366         cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3367
3368         cmd_pkt->rx_dseg_count = 1;
3369         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3370         cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
3371         cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3372         cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3373
3374         return rval;
3375 }
3376
3377 static void
3378 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3379 {
3380         int map, pos;
3381
3382         vce->entry_type = VP_CTRL_IOCB_TYPE;
3383         vce->handle = sp->handle;
3384         vce->entry_count = 1;
3385         vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3386         vce->vp_count = cpu_to_le16(1);
3387
3388         /*
3389          * index map in firmware starts with 1; decrement index
3390          * this is ok as we never use index 0
3391          */
3392         map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3393         pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3394         vce->vp_idx_map[map] |= 1 << pos;
3395 }
3396
3397 static void
3398 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3399 {
3400         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3401         logio->control_flags =
3402             cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3403
3404         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3405         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3406         logio->port_id[1] = sp->fcport->d_id.b.area;
3407         logio->port_id[2] = sp->fcport->d_id.b.domain;
3408         logio->vp_index = sp->fcport->vha->vp_idx;
3409 }
3410
3411 int
3412 qla2x00_start_sp(srb_t *sp)
3413 {
3414         int rval;
3415         scsi_qla_host_t *vha = sp->vha;
3416         struct qla_hw_data *ha = vha->hw;
3417         void *pkt;
3418         unsigned long flags;
3419
3420         rval = QLA_FUNCTION_FAILED;
3421         spin_lock_irqsave(&ha->hardware_lock, flags);
3422         pkt = qla2x00_alloc_iocbs(vha, sp);
3423         if (!pkt) {
3424                 ql_log(ql_log_warn, vha, 0x700c,
3425                     "qla2x00_alloc_iocbs failed.\n");
3426                 goto done;
3427         }
3428
3429         rval = QLA_SUCCESS;
3430         switch (sp->type) {
3431         case SRB_LOGIN_CMD:
3432                 IS_FWI2_CAPABLE(ha) ?
3433                     qla24xx_login_iocb(sp, pkt) :
3434                     qla2x00_login_iocb(sp, pkt);
3435                 break;
3436         case SRB_PRLI_CMD:
3437                 qla24xx_prli_iocb(sp, pkt);
3438                 break;
3439         case SRB_LOGOUT_CMD:
3440                 IS_FWI2_CAPABLE(ha) ?
3441                     qla24xx_logout_iocb(sp, pkt) :
3442                     qla2x00_logout_iocb(sp, pkt);
3443                 break;
3444         case SRB_ELS_CMD_RPT:
3445         case SRB_ELS_CMD_HST:
3446                 qla24xx_els_iocb(sp, pkt);
3447                 break;
3448         case SRB_CT_CMD:
3449                 IS_FWI2_CAPABLE(ha) ?
3450                     qla24xx_ct_iocb(sp, pkt) :
3451                     qla2x00_ct_iocb(sp, pkt);
3452                 break;
3453         case SRB_ADISC_CMD:
3454                 IS_FWI2_CAPABLE(ha) ?
3455                     qla24xx_adisc_iocb(sp, pkt) :
3456                     qla2x00_adisc_iocb(sp, pkt);
3457                 break;
3458         case SRB_TM_CMD:
3459                 IS_QLAFX00(ha) ?
3460                     qlafx00_tm_iocb(sp, pkt) :
3461                     qla24xx_tm_iocb(sp, pkt);
3462                 break;
3463         case SRB_FXIOCB_DCMD:
3464         case SRB_FXIOCB_BCMD:
3465                 qlafx00_fxdisc_iocb(sp, pkt);
3466                 break;
3467         case SRB_NVME_LS:
3468                 qla_nvme_ls(sp, pkt);
3469                 break;
3470         case SRB_ABT_CMD:
3471                 IS_QLAFX00(ha) ?
3472                         qlafx00_abort_iocb(sp, pkt) :
3473                         qla24xx_abort_iocb(sp, pkt);
3474                 break;
3475         case SRB_ELS_DCMD:
3476                 qla24xx_els_logo_iocb(sp, pkt);
3477                 break;
3478         case SRB_CT_PTHRU_CMD:
3479                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3480                 break;
3481         case SRB_MB_IOCB:
3482                 qla2x00_mb_iocb(sp, pkt);
3483                 break;
3484         case SRB_NACK_PLOGI:
3485         case SRB_NACK_PRLI:
3486         case SRB_NACK_LOGO:
3487                 qla2x00_send_notify_ack_iocb(sp, pkt);
3488                 break;
3489         case SRB_CTRL_VP:
3490                 qla25xx_ctrlvp_iocb(sp, pkt);
3491                 break;
3492         case SRB_PRLO_CMD:
3493                 qla24xx_prlo_iocb(sp, pkt);
3494                 break;
3495         default:
3496                 break;
3497         }
3498
3499         wmb();
3500         qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3501 done:
3502         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3503         return rval;
3504 }
3505
3506 static void
3507 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3508                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3509 {
3510         uint16_t avail_dsds;
3511         uint32_t *cur_dsd;
3512         uint32_t req_data_len = 0;
3513         uint32_t rsp_data_len = 0;
3514         struct scatterlist *sg;
3515         int index;
3516         int entry_count = 1;
3517         struct bsg_job *bsg_job = sp->u.bsg_job;
3518
3519         /*Update entry type to indicate bidir command */
3520         *((uint32_t *)(&cmd_pkt->entry_type)) =
3521                 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3522
3523         /* Set the transfer direction, in this set both flags
3524          * Also set the BD_WRAP_BACK flag, firmware will take care
3525          * assigning DID=SID for outgoing pkts.
3526          */
3527         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3528         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3529         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3530                                                         BD_WRAP_BACK);
3531
3532         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3533         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3534         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3535         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3536
3537         vha->bidi_stats.transfer_bytes += req_data_len;
3538         vha->bidi_stats.io_count++;
3539
3540         vha->qla_stats.output_bytes += req_data_len;
3541         vha->qla_stats.output_requests++;
3542
3543         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3544          * are bundled in continuation iocb
3545          */
3546         avail_dsds = 1;
3547         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3548
3549         index = 0;
3550
3551         for_each_sg(bsg_job->request_payload.sg_list, sg,
3552                                 bsg_job->request_payload.sg_cnt, index) {
3553                 dma_addr_t sle_dma;
3554                 cont_a64_entry_t *cont_pkt;
3555
3556                 /* Allocate additional continuation packets */
3557                 if (avail_dsds == 0) {
3558                         /* Continuation type 1 IOCB can accomodate
3559                          * 5 DSDS
3560                          */
3561                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3562                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3563                         avail_dsds = 5;
3564                         entry_count++;
3565                 }
3566                 sle_dma = sg_dma_address(sg);
3567                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3568                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3569                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3570                 avail_dsds--;
3571         }
3572         /* For read request DSD will always goes to continuation IOCB
3573          * and follow the write DSD. If there is room on the current IOCB
3574          * then it is added to that IOCB else new continuation IOCB is
3575          * allocated.
3576          */
3577         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3578                                 bsg_job->reply_payload.sg_cnt, index) {
3579                 dma_addr_t sle_dma;
3580                 cont_a64_entry_t *cont_pkt;
3581
3582                 /* Allocate additional continuation packets */
3583                 if (avail_dsds == 0) {
3584                         /* Continuation type 1 IOCB can accomodate
3585                          * 5 DSDS
3586                          */
3587                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3588                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3589                         avail_dsds = 5;
3590                         entry_count++;
3591                 }
3592                 sle_dma = sg_dma_address(sg);
3593                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3594                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3595                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3596                 avail_dsds--;
3597         }
3598         /* This value should be same as number of IOCB required for this cmd */
3599         cmd_pkt->entry_count = entry_count;
3600 }
3601
3602 int
3603 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3604 {
3605
3606         struct qla_hw_data *ha = vha->hw;
3607         unsigned long flags;
3608         uint32_t handle;
3609         uint32_t index;
3610         uint16_t req_cnt;
3611         uint16_t cnt;
3612         uint32_t *clr_ptr;
3613         struct cmd_bidir *cmd_pkt = NULL;
3614         struct rsp_que *rsp;
3615         struct req_que *req;
3616         int rval = EXT_STATUS_OK;
3617
3618         rval = QLA_SUCCESS;
3619
3620         rsp = ha->rsp_q_map[0];
3621         req = vha->req;
3622
3623         /* Send marker if required */
3624         if (vha->marker_needed != 0) {
3625                 if (qla2x00_marker(vha, req,
3626                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3627                         return EXT_STATUS_MAILBOX;
3628                 vha->marker_needed = 0;
3629         }
3630
3631         /* Acquire ring specific lock */
3632         spin_lock_irqsave(&ha->hardware_lock, flags);
3633
3634         /* Check for room in outstanding command list. */
3635         handle = req->current_outstanding_cmd;
3636         for (index = 1; index < req->num_outstanding_cmds; index++) {
3637                 handle++;
3638                 if (handle == req->num_outstanding_cmds)
3639                         handle = 1;
3640                 if (!req->outstanding_cmds[handle])
3641                         break;
3642         }
3643
3644         if (index == req->num_outstanding_cmds) {
3645                 rval = EXT_STATUS_BUSY;
3646                 goto queuing_error;
3647         }
3648
3649         /* Calculate number of IOCB required */
3650         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3651
3652         /* Check for room on request queue. */
3653         if (req->cnt < req_cnt + 2) {
3654                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3655                     RD_REG_DWORD_RELAXED(req->req_q_out);
3656                 if  (req->ring_index < cnt)
3657                         req->cnt = cnt - req->ring_index;
3658                 else
3659                         req->cnt = req->length -
3660                                 (req->ring_index - cnt);
3661         }
3662         if (req->cnt < req_cnt + 2) {
3663                 rval = EXT_STATUS_BUSY;
3664                 goto queuing_error;
3665         }
3666
3667         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3668         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3669
3670         /* Zero out remaining portion of packet. */
3671         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3672         clr_ptr = (uint32_t *)cmd_pkt + 2;
3673         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3674
3675         /* Set NPORT-ID  (of vha)*/
3676         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3677         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3678         cmd_pkt->port_id[1] = vha->d_id.b.area;
3679         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3680
3681         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3682         cmd_pkt->entry_status = (uint8_t) rsp->id;
3683         /* Build command packet. */
3684         req->current_outstanding_cmd = handle;
3685         req->outstanding_cmds[handle] = sp;
3686         sp->handle = handle;
3687         req->cnt -= req_cnt;
3688
3689         /* Send the command to the firmware */
3690         wmb();
3691         qla2x00_start_iocbs(vha, req);
3692 queuing_error:
3693         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3694         return rval;
3695 }