]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
Merge tag 'v4.20' into for-linus
[linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @sp: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @vha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @vha: HA context
118  * @req: request queue
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         /* Adjust ring index. */
128         req->ring_index++;
129         if (req->ring_index == req->length) {
130                 req->ring_index = 0;
131                 req->ring_ptr = req->ring;
132         } else {
133                 req->ring_ptr++;
134         }
135
136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138         /* Load packet defaults. */
139         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
140             cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141             cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
150         uint8_t guard = scsi_host_get_guard(cmd->device->host);
151
152         /* We always use DIFF Bundling for best performance */
153         *fw_prot_opts = 0;
154
155         /* Translate SCSI opcode to a protection opcode */
156         switch (scsi_get_prot_op(cmd)) {
157         case SCSI_PROT_READ_STRIP:
158                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159                 break;
160         case SCSI_PROT_WRITE_INSERT:
161                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162                 break;
163         case SCSI_PROT_READ_INSERT:
164                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165                 break;
166         case SCSI_PROT_WRITE_STRIP:
167                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168                 break;
169         case SCSI_PROT_READ_PASS:
170         case SCSI_PROT_WRITE_PASS:
171                 if (guard & SHOST_DIX_GUARD_IP)
172                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173                 else
174                         *fw_prot_opts |= PO_MODE_DIF_PASS;
175                 break;
176         default:        /* Normal Request */
177                 *fw_prot_opts |= PO_MODE_DIF_PASS;
178                 break;
179         }
180
181         return scsi_prot_sg_count(cmd);
182 }
183
184 /*
185  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186  * capable IOCB types.
187  *
188  * @sp: SRB command to process
189  * @cmd_pkt: Command type 2 IOCB
190  * @tot_dsds: Total number of segments to transfer
191  */
192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
193     uint16_t tot_dsds)
194 {
195         uint16_t        avail_dsds;
196         uint32_t        *cur_dsd;
197         scsi_qla_host_t *vha;
198         struct scsi_cmnd *cmd;
199         struct scatterlist *sg;
200         int i;
201
202         cmd = GET_CMD_SP(sp);
203
204         /* Update entry type to indicate Command Type 2 IOCB */
205         *((uint32_t *)(&cmd_pkt->entry_type)) =
206             cpu_to_le32(COMMAND_TYPE);
207
208         /* No data transfer */
209         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
210                 cmd_pkt->byte_count = cpu_to_le32(0);
211                 return;
212         }
213
214         vha = sp->vha;
215         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216
217         /* Three DSDs are available in the Command Type 2 IOCB */
218         avail_dsds = 3;
219         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220
221         /* Load data segments */
222         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223                 cont_entry_t *cont_pkt;
224
225                 /* Allocate additional continuation packets? */
226                 if (avail_dsds == 0) {
227                         /*
228                          * Seven DSDs are available in the Continuation
229                          * Type 0 IOCB.
230                          */
231                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
233                         avail_dsds = 7;
234                 }
235
236                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
238                 avail_dsds--;
239         }
240 }
241
242 /**
243  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244  * capable IOCB types.
245  *
246  * @sp: SRB command to process
247  * @cmd_pkt: Command type 3 IOCB
248  * @tot_dsds: Total number of segments to transfer
249  */
250 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
251     uint16_t tot_dsds)
252 {
253         uint16_t        avail_dsds;
254         uint32_t        *cur_dsd;
255         scsi_qla_host_t *vha;
256         struct scsi_cmnd *cmd;
257         struct scatterlist *sg;
258         int i;
259
260         cmd = GET_CMD_SP(sp);
261
262         /* Update entry type to indicate Command Type 3 IOCB */
263         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
264
265         /* No data transfer */
266         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267                 cmd_pkt->byte_count = cpu_to_le32(0);
268                 return;
269         }
270
271         vha = sp->vha;
272         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274         /* Two DSDs are available in the Command Type 3 IOCB */
275         avail_dsds = 2;
276         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278         /* Load data segments */
279         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280                 dma_addr_t      sle_dma;
281                 cont_a64_entry_t *cont_pkt;
282
283                 /* Allocate additional continuation packets? */
284                 if (avail_dsds == 0) {
285                         /*
286                          * Five DSDs are available in the Continuation
287                          * Type 1 IOCB.
288                          */
289                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291                         avail_dsds = 5;
292                 }
293
294                 sle_dma = sg_dma_address(sg);
295                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298                 avail_dsds--;
299         }
300 }
301
302 /**
303  * qla2x00_start_scsi() - Send a SCSI command to the ISP
304  * @sp: command to send to the ISP
305  *
306  * Returns non-zero if a failure occurred, else zero.
307  */
308 int
309 qla2x00_start_scsi(srb_t *sp)
310 {
311         int             nseg;
312         unsigned long   flags;
313         scsi_qla_host_t *vha;
314         struct scsi_cmnd *cmd;
315         uint32_t        *clr_ptr;
316         uint32_t        index;
317         uint32_t        handle;
318         cmd_entry_t     *cmd_pkt;
319         uint16_t        cnt;
320         uint16_t        req_cnt;
321         uint16_t        tot_dsds;
322         struct device_reg_2xxx __iomem *reg;
323         struct qla_hw_data *ha;
324         struct req_que *req;
325         struct rsp_que *rsp;
326
327         /* Setup device pointers. */
328         vha = sp->vha;
329         ha = vha->hw;
330         reg = &ha->iobase->isp;
331         cmd = GET_CMD_SP(sp);
332         req = ha->req_q_map[0];
333         rsp = ha->rsp_q_map[0];
334         /* So we know we haven't pci_map'ed anything yet */
335         tot_dsds = 0;
336
337         /* Send marker if required */
338         if (vha->marker_needed != 0) {
339                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
340                     QLA_SUCCESS) {
341                         return (QLA_FUNCTION_FAILED);
342                 }
343                 vha->marker_needed = 0;
344         }
345
346         /* Acquire ring specific lock */
347         spin_lock_irqsave(&ha->hardware_lock, flags);
348
349         /* Check for room in outstanding command list. */
350         handle = req->current_outstanding_cmd;
351         for (index = 1; index < req->num_outstanding_cmds; index++) {
352                 handle++;
353                 if (handle == req->num_outstanding_cmds)
354                         handle = 1;
355                 if (!req->outstanding_cmds[handle])
356                         break;
357         }
358         if (index == req->num_outstanding_cmds)
359                 goto queuing_error;
360
361         /* Map the sg table so we have an accurate count of sg entries needed */
362         if (scsi_sg_count(cmd)) {
363                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
364                     scsi_sg_count(cmd), cmd->sc_data_direction);
365                 if (unlikely(!nseg))
366                         goto queuing_error;
367         } else
368                 nseg = 0;
369
370         tot_dsds = nseg;
371
372         /* Calculate the number of request entries needed. */
373         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
374         if (req->cnt < (req_cnt + 2)) {
375                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
376                 if (req->ring_index < cnt)
377                         req->cnt = cnt - req->ring_index;
378                 else
379                         req->cnt = req->length -
380                             (req->ring_index - cnt);
381                 /* If still no head room then bail out */
382                 if (req->cnt < (req_cnt + 2))
383                         goto queuing_error;
384         }
385
386         /* Build command packet */
387         req->current_outstanding_cmd = handle;
388         req->outstanding_cmds[handle] = sp;
389         sp->handle = handle;
390         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
391         req->cnt -= req_cnt;
392
393         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
394         cmd_pkt->handle = handle;
395         /* Zero out remaining portion of packet. */
396         clr_ptr = (uint32_t *)cmd_pkt + 2;
397         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
398         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399
400         /* Set target ID and LUN number*/
401         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
402         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
403         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
404
405         /* Load SCSI command packet. */
406         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
407         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
408
409         /* Build IOCB segments */
410         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
411
412         /* Set total data segment count. */
413         cmd_pkt->entry_count = (uint8_t)req_cnt;
414         wmb();
415
416         /* Adjust ring index. */
417         req->ring_index++;
418         if (req->ring_index == req->length) {
419                 req->ring_index = 0;
420                 req->ring_ptr = req->ring;
421         } else
422                 req->ring_ptr++;
423
424         sp->flags |= SRB_DMA_VALID;
425
426         /* Set chip new ring index. */
427         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
428         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
429
430         /* Manage unprocessed RIO/ZIO commands in response queue. */
431         if (vha->flags.process_response_queue &&
432             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
433                 qla2x00_process_response_queue(rsp);
434
435         spin_unlock_irqrestore(&ha->hardware_lock, flags);
436         return (QLA_SUCCESS);
437
438 queuing_error:
439         if (tot_dsds)
440                 scsi_dma_unmap(cmd);
441
442         spin_unlock_irqrestore(&ha->hardware_lock, flags);
443
444         return (QLA_FUNCTION_FAILED);
445 }
446
447 /**
448  * qla2x00_start_iocbs() - Execute the IOCB command
449  * @vha: HA context
450  * @req: request queue
451  */
452 void
453 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
454 {
455         struct qla_hw_data *ha = vha->hw;
456         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
457
458         if (IS_P3P_TYPE(ha)) {
459                 qla82xx_start_iocbs(vha);
460         } else {
461                 /* Adjust ring index. */
462                 req->ring_index++;
463                 if (req->ring_index == req->length) {
464                         req->ring_index = 0;
465                         req->ring_ptr = req->ring;
466                 } else
467                         req->ring_ptr++;
468
469                 /* Set chip new ring index. */
470                 if (ha->mqenable || IS_QLA27XX(ha)) {
471                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
472                 } else if (IS_QLA83XX(ha)) {
473                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
474                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
475                 } else if (IS_QLAFX00(ha)) {
476                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
477                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
478                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
479                 } else if (IS_FWI2_CAPABLE(ha)) {
480                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
481                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
482                 } else {
483                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
484                                 req->ring_index);
485                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
486                 }
487         }
488 }
489
490 /**
491  * qla2x00_marker() - Send a marker IOCB to the firmware.
492  * @vha: HA context
493  * @req: request queue
494  * @rsp: response queue
495  * @loop_id: loop ID
496  * @lun: LUN
497  * @type: marker modifier
498  *
499  * Can be called from both normal and interrupt context.
500  *
501  * Returns non-zero if a failure occurred, else zero.
502  */
503 static int
504 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
505                         struct rsp_que *rsp, uint16_t loop_id,
506                         uint64_t lun, uint8_t type)
507 {
508         mrk_entry_t *mrk;
509         struct mrk_entry_24xx *mrk24 = NULL;
510
511         struct qla_hw_data *ha = vha->hw;
512         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
513
514         req = ha->req_q_map[0];
515         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
516         if (mrk == NULL) {
517                 ql_log(ql_log_warn, base_vha, 0x3026,
518                     "Failed to allocate Marker IOCB.\n");
519
520                 return (QLA_FUNCTION_FAILED);
521         }
522
523         mrk->entry_type = MARKER_TYPE;
524         mrk->modifier = type;
525         if (type != MK_SYNC_ALL) {
526                 if (IS_FWI2_CAPABLE(ha)) {
527                         mrk24 = (struct mrk_entry_24xx *) mrk;
528                         mrk24->nport_handle = cpu_to_le16(loop_id);
529                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531                         mrk24->vp_index = vha->vp_idx;
532                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
533                 } else {
534                         SET_TARGET_ID(ha, mrk->target, loop_id);
535                         mrk->lun = cpu_to_le16((uint16_t)lun);
536                 }
537         }
538         wmb();
539
540         qla2x00_start_iocbs(vha, req);
541
542         return (QLA_SUCCESS);
543 }
544
545 int
546 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
547                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
548                 uint8_t type)
549 {
550         int ret;
551         unsigned long flags = 0;
552
553         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
554         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
555         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
556
557         return (ret);
558 }
559
560 /*
561  * qla2x00_issue_marker
562  *
563  * Issue marker
564  * Caller CAN have hardware lock held as specified by ha_locked parameter.
565  * Might release it, then reaquire.
566  */
567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568 {
569         if (ha_locked) {
570                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
571                                         MK_SYNC_ALL) != QLA_SUCCESS)
572                         return QLA_FUNCTION_FAILED;
573         } else {
574                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
575                                         MK_SYNC_ALL) != QLA_SUCCESS)
576                         return QLA_FUNCTION_FAILED;
577         }
578         vha->marker_needed = 0;
579
580         return QLA_SUCCESS;
581 }
582
583 static inline int
584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585         uint16_t tot_dsds)
586 {
587         uint32_t *cur_dsd = NULL;
588         scsi_qla_host_t *vha;
589         struct qla_hw_data *ha;
590         struct scsi_cmnd *cmd;
591         struct  scatterlist *cur_seg;
592         uint32_t *dsd_seg;
593         void *next_dsd;
594         uint8_t avail_dsds;
595         uint8_t first_iocb = 1;
596         uint32_t dsd_list_len;
597         struct dsd_dma *dsd_ptr;
598         struct ct6_dsd *ctx;
599
600         cmd = GET_CMD_SP(sp);
601
602         /* Update entry type to indicate Command Type 3 IOCB */
603         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
604
605         /* No data transfer */
606         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
607                 cmd_pkt->byte_count = cpu_to_le32(0);
608                 return 0;
609         }
610
611         vha = sp->vha;
612         ha = vha->hw;
613
614         /* Set transfer direction */
615         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
616                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
617                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
618                 vha->qla_stats.output_requests++;
619         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
620                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
621                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622                 vha->qla_stats.input_requests++;
623         }
624
625         cur_seg = scsi_sglist(cmd);
626         ctx = GET_CMD_CTX_SP(sp);
627
628         while (tot_dsds) {
629                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630                     QLA_DSDS_PER_IOCB : tot_dsds;
631                 tot_dsds -= avail_dsds;
632                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
633
634                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635                     struct dsd_dma, list);
636                 next_dsd = dsd_ptr->dsd_addr;
637                 list_del(&dsd_ptr->list);
638                 ha->gbl_dsd_avail--;
639                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640                 ctx->dsd_use_cnt++;
641                 ha->gbl_dsd_inuse++;
642
643                 if (first_iocb) {
644                         first_iocb = 0;
645                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
649                 } else {
650                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
653                 }
654                 cur_dsd = (uint32_t *)next_dsd;
655                 while (avail_dsds) {
656                         dma_addr_t      sle_dma;
657
658                         sle_dma = sg_dma_address(cur_seg);
659                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
662                         cur_seg = sg_next(cur_seg);
663                         avail_dsds--;
664                 }
665         }
666
667         /* Null termination */
668         *cur_dsd++ =  0;
669         *cur_dsd++ = 0;
670         *cur_dsd++ = 0;
671         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
672         return 0;
673 }
674
675 /*
676  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677  * for Command Type 6.
678  *
679  * @dsds: number of data segment decriptors needed
680  *
681  * Returns the number of dsd list needed to store @dsds.
682  */
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds)
685 {
686         uint16_t dsd_lists = 0;
687
688         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689         if (dsds % QLA_DSDS_PER_IOCB)
690                 dsd_lists++;
691         return dsd_lists;
692 }
693
694
695 /**
696  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
697  * IOCB types.
698  *
699  * @sp: SRB command to process
700  * @cmd_pkt: Command type 3 IOCB
701  * @tot_dsds: Total number of segments to transfer
702  * @req: pointer to request queue
703  */
704 inline void
705 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
706         uint16_t tot_dsds, struct req_que *req)
707 {
708         uint16_t        avail_dsds;
709         uint32_t        *cur_dsd;
710         scsi_qla_host_t *vha;
711         struct scsi_cmnd *cmd;
712         struct scatterlist *sg;
713         int i;
714
715         cmd = GET_CMD_SP(sp);
716
717         /* Update entry type to indicate Command Type 3 IOCB */
718         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
719
720         /* No data transfer */
721         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
722                 cmd_pkt->byte_count = cpu_to_le32(0);
723                 return;
724         }
725
726         vha = sp->vha;
727
728         /* Set transfer direction */
729         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
730                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
731                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
732                 vha->qla_stats.output_requests++;
733         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
734                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
735                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
736                 vha->qla_stats.input_requests++;
737         }
738
739         /* One DSD is available in the Command Type 3 IOCB */
740         avail_dsds = 1;
741         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
742
743         /* Load data segments */
744
745         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
746                 dma_addr_t      sle_dma;
747                 cont_a64_entry_t *cont_pkt;
748
749                 /* Allocate additional continuation packets? */
750                 if (avail_dsds == 0) {
751                         /*
752                          * Five DSDs are available in the Continuation
753                          * Type 1 IOCB.
754                          */
755                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
756                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
757                         avail_dsds = 5;
758                 }
759
760                 sle_dma = sg_dma_address(sg);
761                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
762                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
763                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
764                 avail_dsds--;
765         }
766 }
767
768 struct fw_dif_context {
769         uint32_t ref_tag;
770         uint16_t app_tag;
771         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
772         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
773 };
774
775 /*
776  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
777  *
778  */
779 static inline void
780 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
781     unsigned int protcnt)
782 {
783         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
784
785         switch (scsi_get_prot_type(cmd)) {
786         case SCSI_PROT_DIF_TYPE0:
787                 /*
788                  * No check for ql2xenablehba_err_chk, as it would be an
789                  * I/O error if hba tag generation is not done.
790                  */
791                 pkt->ref_tag = cpu_to_le32((uint32_t)
792                     (0xffffffff & scsi_get_lba(cmd)));
793
794                 if (!qla2x00_hba_err_chk_enabled(sp))
795                         break;
796
797                 pkt->ref_tag_mask[0] = 0xff;
798                 pkt->ref_tag_mask[1] = 0xff;
799                 pkt->ref_tag_mask[2] = 0xff;
800                 pkt->ref_tag_mask[3] = 0xff;
801                 break;
802
803         /*
804          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805          * match LBA in CDB + N
806          */
807         case SCSI_PROT_DIF_TYPE2:
808                 pkt->app_tag = cpu_to_le16(0);
809                 pkt->app_tag_mask[0] = 0x0;
810                 pkt->app_tag_mask[1] = 0x0;
811
812                 pkt->ref_tag = cpu_to_le32((uint32_t)
813                     (0xffffffff & scsi_get_lba(cmd)));
814
815                 if (!qla2x00_hba_err_chk_enabled(sp))
816                         break;
817
818                 /* enable ALL bytes of the ref tag */
819                 pkt->ref_tag_mask[0] = 0xff;
820                 pkt->ref_tag_mask[1] = 0xff;
821                 pkt->ref_tag_mask[2] = 0xff;
822                 pkt->ref_tag_mask[3] = 0xff;
823                 break;
824
825         /* For Type 3 protection: 16 bit GUARD only */
826         case SCSI_PROT_DIF_TYPE3:
827                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
828                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
829                                                                 0x00;
830                 break;
831
832         /*
833          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
834          * 16 bit app tag.
835          */
836         case SCSI_PROT_DIF_TYPE1:
837                 pkt->ref_tag = cpu_to_le32((uint32_t)
838                     (0xffffffff & scsi_get_lba(cmd)));
839                 pkt->app_tag = cpu_to_le16(0);
840                 pkt->app_tag_mask[0] = 0x0;
841                 pkt->app_tag_mask[1] = 0x0;
842
843                 if (!qla2x00_hba_err_chk_enabled(sp))
844                         break;
845
846                 /* enable ALL bytes of the ref tag */
847                 pkt->ref_tag_mask[0] = 0xff;
848                 pkt->ref_tag_mask[1] = 0xff;
849                 pkt->ref_tag_mask[2] = 0xff;
850                 pkt->ref_tag_mask[3] = 0xff;
851                 break;
852         }
853 }
854
855 int
856 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
857         uint32_t *partial)
858 {
859         struct scatterlist *sg;
860         uint32_t cumulative_partial, sg_len;
861         dma_addr_t sg_dma_addr;
862
863         if (sgx->num_bytes == sgx->tot_bytes)
864                 return 0;
865
866         sg = sgx->cur_sg;
867         cumulative_partial = sgx->tot_partial;
868
869         sg_dma_addr = sg_dma_address(sg);
870         sg_len = sg_dma_len(sg);
871
872         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
873
874         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
875                 sgx->dma_len = (blk_sz - cumulative_partial);
876                 sgx->tot_partial = 0;
877                 sgx->num_bytes += blk_sz;
878                 *partial = 0;
879         } else {
880                 sgx->dma_len = sg_len - sgx->bytes_consumed;
881                 sgx->tot_partial += sgx->dma_len;
882                 *partial = 1;
883         }
884
885         sgx->bytes_consumed += sgx->dma_len;
886
887         if (sg_len == sgx->bytes_consumed) {
888                 sg = sg_next(sg);
889                 sgx->num_sg++;
890                 sgx->cur_sg = sg;
891                 sgx->bytes_consumed = 0;
892         }
893
894         return 1;
895 }
896
897 int
898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
899         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
900 {
901         void *next_dsd;
902         uint8_t avail_dsds = 0;
903         uint32_t dsd_list_len;
904         struct dsd_dma *dsd_ptr;
905         struct scatterlist *sg_prot;
906         uint32_t *cur_dsd = dsd;
907         uint16_t        used_dsds = tot_dsds;
908         uint32_t        prot_int; /* protection interval */
909         uint32_t        partial;
910         struct qla2_sgx sgx;
911         dma_addr_t      sle_dma;
912         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
913         struct scsi_cmnd *cmd;
914
915         memset(&sgx, 0, sizeof(struct qla2_sgx));
916         if (sp) {
917                 cmd = GET_CMD_SP(sp);
918                 prot_int = cmd->device->sector_size;
919
920                 sgx.tot_bytes = scsi_bufflen(cmd);
921                 sgx.cur_sg = scsi_sglist(cmd);
922                 sgx.sp = sp;
923
924                 sg_prot = scsi_prot_sglist(cmd);
925         } else if (tc) {
926                 prot_int      = tc->blk_sz;
927                 sgx.tot_bytes = tc->bufflen;
928                 sgx.cur_sg    = tc->sg;
929                 sg_prot       = tc->prot_sg;
930         } else {
931                 BUG();
932                 return 1;
933         }
934
935         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
936
937                 sle_dma = sgx.dma_addr;
938                 sle_dma_len = sgx.dma_len;
939 alloc_and_fill:
940                 /* Allocate additional continuation packets? */
941                 if (avail_dsds == 0) {
942                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
943                                         QLA_DSDS_PER_IOCB : used_dsds;
944                         dsd_list_len = (avail_dsds + 1) * 12;
945                         used_dsds -= avail_dsds;
946
947                         /* allocate tracking DS */
948                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
949                         if (!dsd_ptr)
950                                 return 1;
951
952                         /* allocate new list */
953                         dsd_ptr->dsd_addr = next_dsd =
954                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
955                                 &dsd_ptr->dsd_list_dma);
956
957                         if (!next_dsd) {
958                                 /*
959                                  * Need to cleanup only this dsd_ptr, rest
960                                  * will be done by sp_free_dma()
961                                  */
962                                 kfree(dsd_ptr);
963                                 return 1;
964                         }
965
966                         if (sp) {
967                                 list_add_tail(&dsd_ptr->list,
968                                     &((struct crc_context *)
969                                             sp->u.scmd.ctx)->dsd_list);
970
971                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
972                         } else {
973                                 list_add_tail(&dsd_ptr->list,
974                                     &(tc->ctx->dsd_list));
975                                 *tc->ctx_dsd_alloced = 1;
976                         }
977
978
979                         /* add new list to cmd iocb or last list */
980                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
981                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
982                         *cur_dsd++ = dsd_list_len;
983                         cur_dsd = (uint32_t *)next_dsd;
984                 }
985                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
986                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
987                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
988                 avail_dsds--;
989
990                 if (partial == 0) {
991                         /* Got a full protection interval */
992                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
993                         sle_dma_len = 8;
994
995                         tot_prot_dma_len += sle_dma_len;
996                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
997                                 tot_prot_dma_len = 0;
998                                 sg_prot = sg_next(sg_prot);
999                         }
1000
1001                         partial = 1; /* So as to not re-enter this block */
1002                         goto alloc_and_fill;
1003                 }
1004         }
1005         /* Null termination */
1006         *cur_dsd++ = 0;
1007         *cur_dsd++ = 0;
1008         *cur_dsd++ = 0;
1009         return 0;
1010 }
1011
1012 int
1013 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1014         uint16_t tot_dsds, struct qla_tc_param *tc)
1015 {
1016         void *next_dsd;
1017         uint8_t avail_dsds = 0;
1018         uint32_t dsd_list_len;
1019         struct dsd_dma *dsd_ptr;
1020         struct scatterlist *sg, *sgl;
1021         uint32_t *cur_dsd = dsd;
1022         int     i;
1023         uint16_t        used_dsds = tot_dsds;
1024         struct scsi_cmnd *cmd;
1025
1026         if (sp) {
1027                 cmd = GET_CMD_SP(sp);
1028                 sgl = scsi_sglist(cmd);
1029         } else if (tc) {
1030                 sgl = tc->sg;
1031         } else {
1032                 BUG();
1033                 return 1;
1034         }
1035
1036
1037         for_each_sg(sgl, sg, tot_dsds, i) {
1038                 dma_addr_t      sle_dma;
1039
1040                 /* Allocate additional continuation packets? */
1041                 if (avail_dsds == 0) {
1042                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1043                                         QLA_DSDS_PER_IOCB : used_dsds;
1044                         dsd_list_len = (avail_dsds + 1) * 12;
1045                         used_dsds -= avail_dsds;
1046
1047                         /* allocate tracking DS */
1048                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1049                         if (!dsd_ptr)
1050                                 return 1;
1051
1052                         /* allocate new list */
1053                         dsd_ptr->dsd_addr = next_dsd =
1054                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1055                                 &dsd_ptr->dsd_list_dma);
1056
1057                         if (!next_dsd) {
1058                                 /*
1059                                  * Need to cleanup only this dsd_ptr, rest
1060                                  * will be done by sp_free_dma()
1061                                  */
1062                                 kfree(dsd_ptr);
1063                                 return 1;
1064                         }
1065
1066                         if (sp) {
1067                                 list_add_tail(&dsd_ptr->list,
1068                                     &((struct crc_context *)
1069                                             sp->u.scmd.ctx)->dsd_list);
1070
1071                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1072                         } else {
1073                                 list_add_tail(&dsd_ptr->list,
1074                                     &(tc->ctx->dsd_list));
1075                                 *tc->ctx_dsd_alloced = 1;
1076                         }
1077
1078                         /* add new list to cmd iocb or last list */
1079                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1080                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1081                         *cur_dsd++ = dsd_list_len;
1082                         cur_dsd = (uint32_t *)next_dsd;
1083                 }
1084                 sle_dma = sg_dma_address(sg);
1085
1086                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1087                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1088                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1089                 avail_dsds--;
1090
1091         }
1092         /* Null termination */
1093         *cur_dsd++ = 0;
1094         *cur_dsd++ = 0;
1095         *cur_dsd++ = 0;
1096         return 0;
1097 }
1098
1099 int
1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1101         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1102 {
1103         void *next_dsd;
1104         uint8_t avail_dsds = 0;
1105         uint32_t dsd_list_len;
1106         struct dsd_dma *dsd_ptr;
1107         struct scatterlist *sg, *sgl;
1108         int     i;
1109         struct scsi_cmnd *cmd;
1110         uint32_t *cur_dsd = dsd;
1111         uint16_t used_dsds = tot_dsds;
1112         struct scsi_qla_host *vha;
1113
1114         if (sp) {
1115                 cmd = GET_CMD_SP(sp);
1116                 sgl = scsi_prot_sglist(cmd);
1117                 vha = sp->vha;
1118         } else if (tc) {
1119                 vha = tc->vha;
1120                 sgl = tc->prot_sg;
1121         } else {
1122                 BUG();
1123                 return 1;
1124         }
1125
1126         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1127                 "%s: enter\n", __func__);
1128
1129         for_each_sg(sgl, sg, tot_dsds, i) {
1130                 dma_addr_t      sle_dma;
1131
1132                 /* Allocate additional continuation packets? */
1133                 if (avail_dsds == 0) {
1134                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1135                                                 QLA_DSDS_PER_IOCB : used_dsds;
1136                         dsd_list_len = (avail_dsds + 1) * 12;
1137                         used_dsds -= avail_dsds;
1138
1139                         /* allocate tracking DS */
1140                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1141                         if (!dsd_ptr)
1142                                 return 1;
1143
1144                         /* allocate new list */
1145                         dsd_ptr->dsd_addr = next_dsd =
1146                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1147                                 &dsd_ptr->dsd_list_dma);
1148
1149                         if (!next_dsd) {
1150                                 /*
1151                                  * Need to cleanup only this dsd_ptr, rest
1152                                  * will be done by sp_free_dma()
1153                                  */
1154                                 kfree(dsd_ptr);
1155                                 return 1;
1156                         }
1157
1158                         if (sp) {
1159                                 list_add_tail(&dsd_ptr->list,
1160                                     &((struct crc_context *)
1161                                             sp->u.scmd.ctx)->dsd_list);
1162
1163                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1164                         } else {
1165                                 list_add_tail(&dsd_ptr->list,
1166                                     &(tc->ctx->dsd_list));
1167                                 *tc->ctx_dsd_alloced = 1;
1168                         }
1169
1170                         /* add new list to cmd iocb or last list */
1171                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1172                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1173                         *cur_dsd++ = dsd_list_len;
1174                         cur_dsd = (uint32_t *)next_dsd;
1175                 }
1176                 sle_dma = sg_dma_address(sg);
1177
1178                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1179                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1180                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1181
1182                 avail_dsds--;
1183         }
1184         /* Null termination */
1185         *cur_dsd++ = 0;
1186         *cur_dsd++ = 0;
1187         *cur_dsd++ = 0;
1188         return 0;
1189 }
1190
1191 /**
1192  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1193  *                                                      Type 6 IOCB types.
1194  *
1195  * @sp: SRB command to process
1196  * @cmd_pkt: Command type 3 IOCB
1197  * @tot_dsds: Total number of segments to transfer
1198  * @tot_prot_dsds: Total number of segments with protection information
1199  * @fw_prot_opts: Protection options to be passed to firmware
1200  */
1201 inline int
1202 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1203     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1204 {
1205         uint32_t                *cur_dsd, *fcp_dl;
1206         scsi_qla_host_t         *vha;
1207         struct scsi_cmnd        *cmd;
1208         uint32_t                total_bytes = 0;
1209         uint32_t                data_bytes;
1210         uint32_t                dif_bytes;
1211         uint8_t                 bundling = 1;
1212         uint16_t                blk_size;
1213         struct crc_context      *crc_ctx_pkt = NULL;
1214         struct qla_hw_data      *ha;
1215         uint8_t                 additional_fcpcdb_len;
1216         uint16_t                fcp_cmnd_len;
1217         struct fcp_cmnd         *fcp_cmnd;
1218         dma_addr_t              crc_ctx_dma;
1219
1220         cmd = GET_CMD_SP(sp);
1221
1222         /* Update entry type to indicate Command Type CRC_2 IOCB */
1223         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1224
1225         vha = sp->vha;
1226         ha = vha->hw;
1227
1228         /* No data transfer */
1229         data_bytes = scsi_bufflen(cmd);
1230         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1231                 cmd_pkt->byte_count = cpu_to_le32(0);
1232                 return QLA_SUCCESS;
1233         }
1234
1235         cmd_pkt->vp_index = sp->vha->vp_idx;
1236
1237         /* Set transfer direction */
1238         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1239                 cmd_pkt->control_flags =
1240                     cpu_to_le16(CF_WRITE_DATA);
1241         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1242                 cmd_pkt->control_flags =
1243                     cpu_to_le16(CF_READ_DATA);
1244         }
1245
1246         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1247             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1248             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1249             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1250                 bundling = 0;
1251
1252         /* Allocate CRC context from global pool */
1253         crc_ctx_pkt = sp->u.scmd.ctx =
1254             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1255
1256         if (!crc_ctx_pkt)
1257                 goto crc_queuing_error;
1258
1259         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1260
1261         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1262
1263         /* Set handle */
1264         crc_ctx_pkt->handle = cmd_pkt->handle;
1265
1266         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1267
1268         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1269             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1270
1271         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1272         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1273         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1274
1275         /* Determine SCSI command length -- align to 4 byte boundary */
1276         if (cmd->cmd_len > 16) {
1277                 additional_fcpcdb_len = cmd->cmd_len - 16;
1278                 if ((cmd->cmd_len % 4) != 0) {
1279                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1280                         goto crc_queuing_error;
1281                 }
1282                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1283         } else {
1284                 additional_fcpcdb_len = 0;
1285                 fcp_cmnd_len = 12 + 16 + 4;
1286         }
1287
1288         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1289
1290         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1291         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1292                 fcp_cmnd->additional_cdb_len |= 1;
1293         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1294                 fcp_cmnd->additional_cdb_len |= 2;
1295
1296         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1297         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1298         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1299         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1300             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1302             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1303         fcp_cmnd->task_management = 0;
1304         fcp_cmnd->task_attribute = TSK_SIMPLE;
1305
1306         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1307
1308         /* Compute dif len and adjust data len to incude protection */
1309         dif_bytes = 0;
1310         blk_size = cmd->device->sector_size;
1311         dif_bytes = (data_bytes / blk_size) * 8;
1312
1313         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1314         case SCSI_PROT_READ_INSERT:
1315         case SCSI_PROT_WRITE_STRIP:
1316             total_bytes = data_bytes;
1317             data_bytes += dif_bytes;
1318             break;
1319
1320         case SCSI_PROT_READ_STRIP:
1321         case SCSI_PROT_WRITE_INSERT:
1322         case SCSI_PROT_READ_PASS:
1323         case SCSI_PROT_WRITE_PASS:
1324             total_bytes = data_bytes + dif_bytes;
1325             break;
1326         default:
1327             BUG();
1328         }
1329
1330         if (!qla2x00_hba_err_chk_enabled(sp))
1331                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1332         /* HBA error checking enabled */
1333         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1334                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1335                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1336                         SCSI_PROT_DIF_TYPE2))
1337                         fw_prot_opts |= BIT_10;
1338                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1339                     SCSI_PROT_DIF_TYPE3)
1340                         fw_prot_opts |= BIT_11;
1341         }
1342
1343         if (!bundling) {
1344                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1345         } else {
1346                 /*
1347                  * Configure Bundling if we need to fetch interlaving
1348                  * protection PCI accesses
1349                  */
1350                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1351                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1352                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1353                                                         tot_prot_dsds);
1354                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1355         }
1356
1357         /* Finish the common fields of CRC pkt */
1358         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1359         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1360         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1361         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1362         /* Fibre channel byte count */
1363         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1364         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1365             additional_fcpcdb_len);
1366         *fcp_dl = htonl(total_bytes);
1367
1368         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1369                 cmd_pkt->byte_count = cpu_to_le32(0);
1370                 return QLA_SUCCESS;
1371         }
1372         /* Walks data segments */
1373
1374         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1375
1376         if (!bundling && tot_prot_dsds) {
1377                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1378                         cur_dsd, tot_dsds, NULL))
1379                         goto crc_queuing_error;
1380         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1381                         (tot_dsds - tot_prot_dsds), NULL))
1382                 goto crc_queuing_error;
1383
1384         if (bundling && tot_prot_dsds) {
1385                 /* Walks dif segments */
1386                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1387                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1388                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1389                                 tot_prot_dsds, NULL))
1390                         goto crc_queuing_error;
1391         }
1392         return QLA_SUCCESS;
1393
1394 crc_queuing_error:
1395         /* Cleanup will be performed by the caller */
1396
1397         return QLA_FUNCTION_FAILED;
1398 }
1399
1400 /**
1401  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1402  * @sp: command to send to the ISP
1403  *
1404  * Returns non-zero if a failure occurred, else zero.
1405  */
1406 int
1407 qla24xx_start_scsi(srb_t *sp)
1408 {
1409         int             nseg;
1410         unsigned long   flags;
1411         uint32_t        *clr_ptr;
1412         uint32_t        index;
1413         uint32_t        handle;
1414         struct cmd_type_7 *cmd_pkt;
1415         uint16_t        cnt;
1416         uint16_t        req_cnt;
1417         uint16_t        tot_dsds;
1418         struct req_que *req = NULL;
1419         struct rsp_que *rsp = NULL;
1420         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1421         struct scsi_qla_host *vha = sp->vha;
1422         struct qla_hw_data *ha = vha->hw;
1423
1424         /* Setup device pointers. */
1425         req = vha->req;
1426         rsp = req->rsp;
1427
1428         /* So we know we haven't pci_map'ed anything yet */
1429         tot_dsds = 0;
1430
1431         /* Send marker if required */
1432         if (vha->marker_needed != 0) {
1433                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1434                     QLA_SUCCESS)
1435                         return QLA_FUNCTION_FAILED;
1436                 vha->marker_needed = 0;
1437         }
1438
1439         /* Acquire ring specific lock */
1440         spin_lock_irqsave(&ha->hardware_lock, flags);
1441
1442         /* Check for room in outstanding command list. */
1443         handle = req->current_outstanding_cmd;
1444         for (index = 1; index < req->num_outstanding_cmds; index++) {
1445                 handle++;
1446                 if (handle == req->num_outstanding_cmds)
1447                         handle = 1;
1448                 if (!req->outstanding_cmds[handle])
1449                         break;
1450         }
1451         if (index == req->num_outstanding_cmds)
1452                 goto queuing_error;
1453
1454         /* Map the sg table so we have an accurate count of sg entries needed */
1455         if (scsi_sg_count(cmd)) {
1456                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1457                     scsi_sg_count(cmd), cmd->sc_data_direction);
1458                 if (unlikely(!nseg))
1459                         goto queuing_error;
1460         } else
1461                 nseg = 0;
1462
1463         tot_dsds = nseg;
1464         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1465         if (req->cnt < (req_cnt + 2)) {
1466                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1467                     RD_REG_DWORD_RELAXED(req->req_q_out);
1468                 if (req->ring_index < cnt)
1469                         req->cnt = cnt - req->ring_index;
1470                 else
1471                         req->cnt = req->length -
1472                                 (req->ring_index - cnt);
1473                 if (req->cnt < (req_cnt + 2))
1474                         goto queuing_error;
1475         }
1476
1477         /* Build command packet. */
1478         req->current_outstanding_cmd = handle;
1479         req->outstanding_cmds[handle] = sp;
1480         sp->handle = handle;
1481         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1482         req->cnt -= req_cnt;
1483
1484         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1485         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1486
1487         /* Zero out remaining portion of packet. */
1488         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1489         clr_ptr = (uint32_t *)cmd_pkt + 2;
1490         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1491         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1492
1493         /* Set NPORT-ID and LUN number*/
1494         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1495         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1496         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1497         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1498         cmd_pkt->vp_index = sp->vha->vp_idx;
1499
1500         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1501         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1502
1503         cmd_pkt->task = TSK_SIMPLE;
1504
1505         /* Load SCSI command packet. */
1506         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1507         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1508
1509         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1510
1511         /* Build IOCB segments */
1512         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1513
1514         /* Set total data segment count. */
1515         cmd_pkt->entry_count = (uint8_t)req_cnt;
1516         wmb();
1517         /* Adjust ring index. */
1518         req->ring_index++;
1519         if (req->ring_index == req->length) {
1520                 req->ring_index = 0;
1521                 req->ring_ptr = req->ring;
1522         } else
1523                 req->ring_ptr++;
1524
1525         sp->flags |= SRB_DMA_VALID;
1526
1527         /* Set chip new ring index. */
1528         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1529
1530         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1531         return QLA_SUCCESS;
1532
1533 queuing_error:
1534         if (tot_dsds)
1535                 scsi_dma_unmap(cmd);
1536
1537         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1538
1539         return QLA_FUNCTION_FAILED;
1540 }
1541
1542 /**
1543  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1544  * @sp: command to send to the ISP
1545  *
1546  * Returns non-zero if a failure occurred, else zero.
1547  */
1548 int
1549 qla24xx_dif_start_scsi(srb_t *sp)
1550 {
1551         int                     nseg;
1552         unsigned long           flags;
1553         uint32_t                *clr_ptr;
1554         uint32_t                index;
1555         uint32_t                handle;
1556         uint16_t                cnt;
1557         uint16_t                req_cnt = 0;
1558         uint16_t                tot_dsds;
1559         uint16_t                tot_prot_dsds;
1560         uint16_t                fw_prot_opts = 0;
1561         struct req_que          *req = NULL;
1562         struct rsp_que          *rsp = NULL;
1563         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1564         struct scsi_qla_host    *vha = sp->vha;
1565         struct qla_hw_data      *ha = vha->hw;
1566         struct cmd_type_crc_2   *cmd_pkt;
1567         uint32_t                status = 0;
1568
1569 #define QDSS_GOT_Q_SPACE        BIT_0
1570
1571         /* Only process protection or >16 cdb in this routine */
1572         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1573                 if (cmd->cmd_len <= 16)
1574                         return qla24xx_start_scsi(sp);
1575         }
1576
1577         /* Setup device pointers. */
1578         req = vha->req;
1579         rsp = req->rsp;
1580
1581         /* So we know we haven't pci_map'ed anything yet */
1582         tot_dsds = 0;
1583
1584         /* Send marker if required */
1585         if (vha->marker_needed != 0) {
1586                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1587                     QLA_SUCCESS)
1588                         return QLA_FUNCTION_FAILED;
1589                 vha->marker_needed = 0;
1590         }
1591
1592         /* Acquire ring specific lock */
1593         spin_lock_irqsave(&ha->hardware_lock, flags);
1594
1595         /* Check for room in outstanding command list. */
1596         handle = req->current_outstanding_cmd;
1597         for (index = 1; index < req->num_outstanding_cmds; index++) {
1598                 handle++;
1599                 if (handle == req->num_outstanding_cmds)
1600                         handle = 1;
1601                 if (!req->outstanding_cmds[handle])
1602                         break;
1603         }
1604
1605         if (index == req->num_outstanding_cmds)
1606                 goto queuing_error;
1607
1608         /* Compute number of required data segments */
1609         /* Map the sg table so we have an accurate count of sg entries needed */
1610         if (scsi_sg_count(cmd)) {
1611                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1612                     scsi_sg_count(cmd), cmd->sc_data_direction);
1613                 if (unlikely(!nseg))
1614                         goto queuing_error;
1615                 else
1616                         sp->flags |= SRB_DMA_VALID;
1617
1618                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1619                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1620                         struct qla2_sgx sgx;
1621                         uint32_t        partial;
1622
1623                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1624                         sgx.tot_bytes = scsi_bufflen(cmd);
1625                         sgx.cur_sg = scsi_sglist(cmd);
1626                         sgx.sp = sp;
1627
1628                         nseg = 0;
1629                         while (qla24xx_get_one_block_sg(
1630                             cmd->device->sector_size, &sgx, &partial))
1631                                 nseg++;
1632                 }
1633         } else
1634                 nseg = 0;
1635
1636         /* number of required data segments */
1637         tot_dsds = nseg;
1638
1639         /* Compute number of required protection segments */
1640         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1641                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1642                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1643                 if (unlikely(!nseg))
1644                         goto queuing_error;
1645                 else
1646                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1647
1648                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1649                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1650                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1651                 }
1652         } else {
1653                 nseg = 0;
1654         }
1655
1656         req_cnt = 1;
1657         /* Total Data and protection sg segment(s) */
1658         tot_prot_dsds = nseg;
1659         tot_dsds += nseg;
1660         if (req->cnt < (req_cnt + 2)) {
1661                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1662                     RD_REG_DWORD_RELAXED(req->req_q_out);
1663                 if (req->ring_index < cnt)
1664                         req->cnt = cnt - req->ring_index;
1665                 else
1666                         req->cnt = req->length -
1667                                 (req->ring_index - cnt);
1668                 if (req->cnt < (req_cnt + 2))
1669                         goto queuing_error;
1670         }
1671
1672         status |= QDSS_GOT_Q_SPACE;
1673
1674         /* Build header part of command packet (excluding the OPCODE). */
1675         req->current_outstanding_cmd = handle;
1676         req->outstanding_cmds[handle] = sp;
1677         sp->handle = handle;
1678         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1679         req->cnt -= req_cnt;
1680
1681         /* Fill-in common area */
1682         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1683         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1684
1685         clr_ptr = (uint32_t *)cmd_pkt + 2;
1686         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1687
1688         /* Set NPORT-ID and LUN number*/
1689         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1690         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1691         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1692         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1693
1694         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1695         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1696
1697         /* Total Data and protection segment(s) */
1698         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1699
1700         /* Build IOCB segments and adjust for data protection segments */
1701         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1702             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1703                 QLA_SUCCESS)
1704                 goto queuing_error;
1705
1706         cmd_pkt->entry_count = (uint8_t)req_cnt;
1707         /* Specify response queue number where completion should happen */
1708         cmd_pkt->entry_status = (uint8_t) rsp->id;
1709         cmd_pkt->timeout = cpu_to_le16(0);
1710         wmb();
1711
1712         /* Adjust ring index. */
1713         req->ring_index++;
1714         if (req->ring_index == req->length) {
1715                 req->ring_index = 0;
1716                 req->ring_ptr = req->ring;
1717         } else
1718                 req->ring_ptr++;
1719
1720         /* Set chip new ring index. */
1721         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1722
1723         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1724
1725         return QLA_SUCCESS;
1726
1727 queuing_error:
1728         if (status & QDSS_GOT_Q_SPACE) {
1729                 req->outstanding_cmds[handle] = NULL;
1730                 req->cnt += req_cnt;
1731         }
1732         /* Cleanup will be performed by the caller (queuecommand) */
1733
1734         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1735         return QLA_FUNCTION_FAILED;
1736 }
1737
1738 /**
1739  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1740  * @sp: command to send to the ISP
1741  *
1742  * Returns non-zero if a failure occurred, else zero.
1743  */
1744 static int
1745 qla2xxx_start_scsi_mq(srb_t *sp)
1746 {
1747         int             nseg;
1748         unsigned long   flags;
1749         uint32_t        *clr_ptr;
1750         uint32_t        index;
1751         uint32_t        handle;
1752         struct cmd_type_7 *cmd_pkt;
1753         uint16_t        cnt;
1754         uint16_t        req_cnt;
1755         uint16_t        tot_dsds;
1756         struct req_que *req = NULL;
1757         struct rsp_que *rsp = NULL;
1758         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1759         struct scsi_qla_host *vha = sp->fcport->vha;
1760         struct qla_hw_data *ha = vha->hw;
1761         struct qla_qpair *qpair = sp->qpair;
1762
1763         /* Acquire qpair specific lock */
1764         spin_lock_irqsave(&qpair->qp_lock, flags);
1765
1766         /* Setup qpair pointers */
1767         rsp = qpair->rsp;
1768         req = qpair->req;
1769
1770         /* So we know we haven't pci_map'ed anything yet */
1771         tot_dsds = 0;
1772
1773         /* Send marker if required */
1774         if (vha->marker_needed != 0) {
1775                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1776                     QLA_SUCCESS) {
1777                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1778                         return QLA_FUNCTION_FAILED;
1779                 }
1780                 vha->marker_needed = 0;
1781         }
1782
1783         /* Check for room in outstanding command list. */
1784         handle = req->current_outstanding_cmd;
1785         for (index = 1; index < req->num_outstanding_cmds; index++) {
1786                 handle++;
1787                 if (handle == req->num_outstanding_cmds)
1788                         handle = 1;
1789                 if (!req->outstanding_cmds[handle])
1790                         break;
1791         }
1792         if (index == req->num_outstanding_cmds)
1793                 goto queuing_error;
1794
1795         /* Map the sg table so we have an accurate count of sg entries needed */
1796         if (scsi_sg_count(cmd)) {
1797                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1798                     scsi_sg_count(cmd), cmd->sc_data_direction);
1799                 if (unlikely(!nseg))
1800                         goto queuing_error;
1801         } else
1802                 nseg = 0;
1803
1804         tot_dsds = nseg;
1805         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1806         if (req->cnt < (req_cnt + 2)) {
1807                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1808                     RD_REG_DWORD_RELAXED(req->req_q_out);
1809                 if (req->ring_index < cnt)
1810                         req->cnt = cnt - req->ring_index;
1811                 else
1812                         req->cnt = req->length -
1813                                 (req->ring_index - cnt);
1814                 if (req->cnt < (req_cnt + 2))
1815                         goto queuing_error;
1816         }
1817
1818         /* Build command packet. */
1819         req->current_outstanding_cmd = handle;
1820         req->outstanding_cmds[handle] = sp;
1821         sp->handle = handle;
1822         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1823         req->cnt -= req_cnt;
1824
1825         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1826         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1827
1828         /* Zero out remaining portion of packet. */
1829         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1830         clr_ptr = (uint32_t *)cmd_pkt + 2;
1831         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1832         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1833
1834         /* Set NPORT-ID and LUN number*/
1835         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1836         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1837         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1838         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1839         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1840
1841         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1842         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1843
1844         cmd_pkt->task = TSK_SIMPLE;
1845
1846         /* Load SCSI command packet. */
1847         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1848         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1849
1850         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1851
1852         /* Build IOCB segments */
1853         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1854
1855         /* Set total data segment count. */
1856         cmd_pkt->entry_count = (uint8_t)req_cnt;
1857         wmb();
1858         /* Adjust ring index. */
1859         req->ring_index++;
1860         if (req->ring_index == req->length) {
1861                 req->ring_index = 0;
1862                 req->ring_ptr = req->ring;
1863         } else
1864                 req->ring_ptr++;
1865
1866         sp->flags |= SRB_DMA_VALID;
1867
1868         /* Set chip new ring index. */
1869         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1870
1871         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1872         return QLA_SUCCESS;
1873
1874 queuing_error:
1875         if (tot_dsds)
1876                 scsi_dma_unmap(cmd);
1877
1878         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1879
1880         return QLA_FUNCTION_FAILED;
1881 }
1882
1883
1884 /**
1885  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1886  * @sp: command to send to the ISP
1887  *
1888  * Returns non-zero if a failure occurred, else zero.
1889  */
1890 int
1891 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1892 {
1893         int                     nseg;
1894         unsigned long           flags;
1895         uint32_t                *clr_ptr;
1896         uint32_t                index;
1897         uint32_t                handle;
1898         uint16_t                cnt;
1899         uint16_t                req_cnt = 0;
1900         uint16_t                tot_dsds;
1901         uint16_t                tot_prot_dsds;
1902         uint16_t                fw_prot_opts = 0;
1903         struct req_que          *req = NULL;
1904         struct rsp_que          *rsp = NULL;
1905         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1906         struct scsi_qla_host    *vha = sp->fcport->vha;
1907         struct qla_hw_data      *ha = vha->hw;
1908         struct cmd_type_crc_2   *cmd_pkt;
1909         uint32_t                status = 0;
1910         struct qla_qpair        *qpair = sp->qpair;
1911
1912 #define QDSS_GOT_Q_SPACE        BIT_0
1913
1914         /* Check for host side state */
1915         if (!qpair->online) {
1916                 cmd->result = DID_NO_CONNECT << 16;
1917                 return QLA_INTERFACE_ERROR;
1918         }
1919
1920         if (!qpair->difdix_supported &&
1921                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1922                 cmd->result = DID_NO_CONNECT << 16;
1923                 return QLA_INTERFACE_ERROR;
1924         }
1925
1926         /* Only process protection or >16 cdb in this routine */
1927         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1928                 if (cmd->cmd_len <= 16)
1929                         return qla2xxx_start_scsi_mq(sp);
1930         }
1931
1932         spin_lock_irqsave(&qpair->qp_lock, flags);
1933
1934         /* Setup qpair pointers */
1935         rsp = qpair->rsp;
1936         req = qpair->req;
1937
1938         /* So we know we haven't pci_map'ed anything yet */
1939         tot_dsds = 0;
1940
1941         /* Send marker if required */
1942         if (vha->marker_needed != 0) {
1943                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1944                     QLA_SUCCESS) {
1945                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1946                         return QLA_FUNCTION_FAILED;
1947                 }
1948                 vha->marker_needed = 0;
1949         }
1950
1951         /* Check for room in outstanding command list. */
1952         handle = req->current_outstanding_cmd;
1953         for (index = 1; index < req->num_outstanding_cmds; index++) {
1954                 handle++;
1955                 if (handle == req->num_outstanding_cmds)
1956                         handle = 1;
1957                 if (!req->outstanding_cmds[handle])
1958                         break;
1959         }
1960
1961         if (index == req->num_outstanding_cmds)
1962                 goto queuing_error;
1963
1964         /* Compute number of required data segments */
1965         /* Map the sg table so we have an accurate count of sg entries needed */
1966         if (scsi_sg_count(cmd)) {
1967                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1968                     scsi_sg_count(cmd), cmd->sc_data_direction);
1969                 if (unlikely(!nseg))
1970                         goto queuing_error;
1971                 else
1972                         sp->flags |= SRB_DMA_VALID;
1973
1974                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1975                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1976                         struct qla2_sgx sgx;
1977                         uint32_t        partial;
1978
1979                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1980                         sgx.tot_bytes = scsi_bufflen(cmd);
1981                         sgx.cur_sg = scsi_sglist(cmd);
1982                         sgx.sp = sp;
1983
1984                         nseg = 0;
1985                         while (qla24xx_get_one_block_sg(
1986                             cmd->device->sector_size, &sgx, &partial))
1987                                 nseg++;
1988                 }
1989         } else
1990                 nseg = 0;
1991
1992         /* number of required data segments */
1993         tot_dsds = nseg;
1994
1995         /* Compute number of required protection segments */
1996         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1997                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1998                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1999                 if (unlikely(!nseg))
2000                         goto queuing_error;
2001                 else
2002                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2003
2004                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2005                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2006                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2007                 }
2008         } else {
2009                 nseg = 0;
2010         }
2011
2012         req_cnt = 1;
2013         /* Total Data and protection sg segment(s) */
2014         tot_prot_dsds = nseg;
2015         tot_dsds += nseg;
2016         if (req->cnt < (req_cnt + 2)) {
2017                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2018                     RD_REG_DWORD_RELAXED(req->req_q_out);
2019                 if (req->ring_index < cnt)
2020                         req->cnt = cnt - req->ring_index;
2021                 else
2022                         req->cnt = req->length -
2023                                 (req->ring_index - cnt);
2024                 if (req->cnt < (req_cnt + 2))
2025                         goto queuing_error;
2026         }
2027
2028         status |= QDSS_GOT_Q_SPACE;
2029
2030         /* Build header part of command packet (excluding the OPCODE). */
2031         req->current_outstanding_cmd = handle;
2032         req->outstanding_cmds[handle] = sp;
2033         sp->handle = handle;
2034         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2035         req->cnt -= req_cnt;
2036
2037         /* Fill-in common area */
2038         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2039         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2040
2041         clr_ptr = (uint32_t *)cmd_pkt + 2;
2042         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2043
2044         /* Set NPORT-ID and LUN number*/
2045         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2046         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2047         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2048         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2049
2050         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2051         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2052
2053         /* Total Data and protection segment(s) */
2054         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2055
2056         /* Build IOCB segments and adjust for data protection segments */
2057         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2058             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2059                 QLA_SUCCESS)
2060                 goto queuing_error;
2061
2062         cmd_pkt->entry_count = (uint8_t)req_cnt;
2063         cmd_pkt->timeout = cpu_to_le16(0);
2064         wmb();
2065
2066         /* Adjust ring index. */
2067         req->ring_index++;
2068         if (req->ring_index == req->length) {
2069                 req->ring_index = 0;
2070                 req->ring_ptr = req->ring;
2071         } else
2072                 req->ring_ptr++;
2073
2074         /* Set chip new ring index. */
2075         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2076
2077         /* Manage unprocessed RIO/ZIO commands in response queue. */
2078         if (vha->flags.process_response_queue &&
2079             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2080                 qla24xx_process_response_queue(vha, rsp);
2081
2082         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2083
2084         return QLA_SUCCESS;
2085
2086 queuing_error:
2087         if (status & QDSS_GOT_Q_SPACE) {
2088                 req->outstanding_cmds[handle] = NULL;
2089                 req->cnt += req_cnt;
2090         }
2091         /* Cleanup will be performed by the caller (queuecommand) */
2092
2093         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2094         return QLA_FUNCTION_FAILED;
2095 }
2096
2097 /* Generic Control-SRB manipulation functions. */
2098
2099 /* hardware_lock assumed to be held. */
2100
2101 void *
2102 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2103 {
2104         scsi_qla_host_t *vha = qpair->vha;
2105         struct qla_hw_data *ha = vha->hw;
2106         struct req_que *req = qpair->req;
2107         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2108         uint32_t index, handle;
2109         request_t *pkt;
2110         uint16_t cnt, req_cnt;
2111
2112         pkt = NULL;
2113         req_cnt = 1;
2114         handle = 0;
2115
2116         if (sp && (sp->type != SRB_SCSI_CMD)) {
2117                 /* Adjust entry-counts as needed. */
2118                 req_cnt = sp->iocbs;
2119         }
2120
2121         /* Check for room on request queue. */
2122         if (req->cnt < req_cnt + 2) {
2123                 if (qpair->use_shadow_reg)
2124                         cnt = *req->out_ptr;
2125                 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2126                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2127                 else if (IS_P3P_TYPE(ha))
2128                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2129                 else if (IS_FWI2_CAPABLE(ha))
2130                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2131                 else if (IS_QLAFX00(ha))
2132                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2133                 else
2134                         cnt = qla2x00_debounce_register(
2135                             ISP_REQ_Q_OUT(ha, &reg->isp));
2136
2137                 if  (req->ring_index < cnt)
2138                         req->cnt = cnt - req->ring_index;
2139                 else
2140                         req->cnt = req->length -
2141                             (req->ring_index - cnt);
2142         }
2143         if (req->cnt < req_cnt + 2)
2144                 goto queuing_error;
2145
2146         if (sp) {
2147                 /* Check for room in outstanding command list. */
2148                 handle = req->current_outstanding_cmd;
2149                 for (index = 1; index < req->num_outstanding_cmds; index++) {
2150                         handle++;
2151                         if (handle == req->num_outstanding_cmds)
2152                                 handle = 1;
2153                         if (!req->outstanding_cmds[handle])
2154                                 break;
2155                 }
2156                 if (index == req->num_outstanding_cmds) {
2157                         ql_log(ql_log_warn, vha, 0x700b,
2158                             "No room on outstanding cmd array.\n");
2159                         goto queuing_error;
2160                 }
2161
2162                 /* Prep command array. */
2163                 req->current_outstanding_cmd = handle;
2164                 req->outstanding_cmds[handle] = sp;
2165                 sp->handle = handle;
2166         }
2167
2168         /* Prep packet */
2169         req->cnt -= req_cnt;
2170         pkt = req->ring_ptr;
2171         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2172         if (IS_QLAFX00(ha)) {
2173                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2174                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2175         } else {
2176                 pkt->entry_count = req_cnt;
2177                 pkt->handle = handle;
2178         }
2179
2180         return pkt;
2181
2182 queuing_error:
2183         qpair->tgt_counters.num_alloc_iocb_failed++;
2184         return pkt;
2185 }
2186
2187 void *
2188 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2189 {
2190         scsi_qla_host_t *vha = qpair->vha;
2191
2192         if (qla2x00_reset_active(vha))
2193                 return NULL;
2194
2195         return __qla2x00_alloc_iocbs(qpair, sp);
2196 }
2197
2198 void *
2199 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2200 {
2201         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2202 }
2203
2204 static void
2205 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2206 {
2207         struct srb_iocb *lio = &sp->u.iocb_cmd;
2208
2209         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2210         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2211         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2212                 logio->control_flags |= LCF_NVME_PRLI;
2213
2214         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2215         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2216         logio->port_id[1] = sp->fcport->d_id.b.area;
2217         logio->port_id[2] = sp->fcport->d_id.b.domain;
2218         logio->vp_index = sp->vha->vp_idx;
2219 }
2220
2221 static void
2222 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2223 {
2224         struct srb_iocb *lio = &sp->u.iocb_cmd;
2225
2226         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2227         if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2228                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2229         } else {
2230                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2231                 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2232                         logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2233                 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2234                         logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2235         }
2236         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2237         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2238         logio->port_id[1] = sp->fcport->d_id.b.area;
2239         logio->port_id[2] = sp->fcport->d_id.b.domain;
2240         logio->vp_index = sp->vha->vp_idx;
2241 }
2242
2243 static void
2244 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2245 {
2246         struct qla_hw_data *ha = sp->vha->hw;
2247         struct srb_iocb *lio = &sp->u.iocb_cmd;
2248         uint16_t opts;
2249
2250         mbx->entry_type = MBX_IOCB_TYPE;
2251         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2252         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2253         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2254         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2255         if (HAS_EXTENDED_IDS(ha)) {
2256                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2257                 mbx->mb10 = cpu_to_le16(opts);
2258         } else {
2259                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2260         }
2261         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2262         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2263             sp->fcport->d_id.b.al_pa);
2264         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2265 }
2266
2267 static void
2268 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2269 {
2270         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2271         logio->control_flags =
2272             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2273         if (!sp->fcport->keep_nport_handle)
2274                 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2275         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2276         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2277         logio->port_id[1] = sp->fcport->d_id.b.area;
2278         logio->port_id[2] = sp->fcport->d_id.b.domain;
2279         logio->vp_index = sp->vha->vp_idx;
2280 }
2281
2282 static void
2283 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2284 {
2285         struct qla_hw_data *ha = sp->vha->hw;
2286
2287         mbx->entry_type = MBX_IOCB_TYPE;
2288         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2289         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2290         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2291             cpu_to_le16(sp->fcport->loop_id):
2292             cpu_to_le16(sp->fcport->loop_id << 8);
2293         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2294         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2295             sp->fcport->d_id.b.al_pa);
2296         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2297         /* Implicit: mbx->mbx10 = 0. */
2298 }
2299
2300 static void
2301 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2302 {
2303         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2304         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2305         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2306         logio->vp_index = sp->vha->vp_idx;
2307 }
2308
2309 static void
2310 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2311 {
2312         struct qla_hw_data *ha = sp->vha->hw;
2313
2314         mbx->entry_type = MBX_IOCB_TYPE;
2315         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2316         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2317         if (HAS_EXTENDED_IDS(ha)) {
2318                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2319                 mbx->mb10 = cpu_to_le16(BIT_0);
2320         } else {
2321                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2322         }
2323         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2324         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2325         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2326         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2327         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2328 }
2329
2330 static void
2331 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2332 {
2333         uint32_t flags;
2334         uint64_t lun;
2335         struct fc_port *fcport = sp->fcport;
2336         scsi_qla_host_t *vha = fcport->vha;
2337         struct qla_hw_data *ha = vha->hw;
2338         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2339         struct req_que *req = vha->req;
2340
2341         flags = iocb->u.tmf.flags;
2342         lun = iocb->u.tmf.lun;
2343
2344         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2345         tsk->entry_count = 1;
2346         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2347         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2348         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2349         tsk->control_flags = cpu_to_le32(flags);
2350         tsk->port_id[0] = fcport->d_id.b.al_pa;
2351         tsk->port_id[1] = fcport->d_id.b.area;
2352         tsk->port_id[2] = fcport->d_id.b.domain;
2353         tsk->vp_index = fcport->vha->vp_idx;
2354
2355         if (flags == TCF_LUN_RESET) {
2356                 int_to_scsilun(lun, &tsk->lun);
2357                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2358                         sizeof(tsk->lun));
2359         }
2360 }
2361
2362 static void
2363 qla2x00_els_dcmd_sp_free(void *data)
2364 {
2365         srb_t *sp = data;
2366         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2367
2368         kfree(sp->fcport);
2369
2370         if (elsio->u.els_logo.els_logo_pyld)
2371                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2372                     elsio->u.els_logo.els_logo_pyld,
2373                     elsio->u.els_logo.els_logo_pyld_dma);
2374
2375         del_timer(&elsio->timer);
2376         qla2x00_rel_sp(sp);
2377 }
2378
2379 static void
2380 qla2x00_els_dcmd_iocb_timeout(void *data)
2381 {
2382         srb_t *sp = data;
2383         fc_port_t *fcport = sp->fcport;
2384         struct scsi_qla_host *vha = sp->vha;
2385         struct srb_iocb *lio = &sp->u.iocb_cmd;
2386
2387         ql_dbg(ql_dbg_io, vha, 0x3069,
2388             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2389             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2390             fcport->d_id.b.al_pa);
2391
2392         complete(&lio->u.els_logo.comp);
2393 }
2394
2395 static void
2396 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2397 {
2398         srb_t *sp = ptr;
2399         fc_port_t *fcport = sp->fcport;
2400         struct srb_iocb *lio = &sp->u.iocb_cmd;
2401         struct scsi_qla_host *vha = sp->vha;
2402
2403         ql_dbg(ql_dbg_io, vha, 0x3072,
2404             "%s hdl=%x, portid=%02x%02x%02x done\n",
2405             sp->name, sp->handle, fcport->d_id.b.domain,
2406             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2407
2408         complete(&lio->u.els_logo.comp);
2409 }
2410
2411 int
2412 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2413     port_id_t remote_did)
2414 {
2415         srb_t *sp;
2416         fc_port_t *fcport = NULL;
2417         struct srb_iocb *elsio = NULL;
2418         struct qla_hw_data *ha = vha->hw;
2419         struct els_logo_payload logo_pyld;
2420         int rval = QLA_SUCCESS;
2421
2422         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2423         if (!fcport) {
2424                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2425                return -ENOMEM;
2426         }
2427
2428         /* Alloc SRB structure */
2429         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2430         if (!sp) {
2431                 kfree(fcport);
2432                 ql_log(ql_log_info, vha, 0x70e6,
2433                  "SRB allocation failed\n");
2434                 return -ENOMEM;
2435         }
2436
2437         elsio = &sp->u.iocb_cmd;
2438         fcport->loop_id = 0xFFFF;
2439         fcport->d_id.b.domain = remote_did.b.domain;
2440         fcport->d_id.b.area = remote_did.b.area;
2441         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2442
2443         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2444             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2445
2446         sp->type = SRB_ELS_DCMD;
2447         sp->name = "ELS_DCMD";
2448         sp->fcport = fcport;
2449         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2450         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2451         init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2452         sp->done = qla2x00_els_dcmd_sp_done;
2453         sp->free = qla2x00_els_dcmd_sp_free;
2454
2455         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2456                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2457                             GFP_KERNEL);
2458
2459         if (!elsio->u.els_logo.els_logo_pyld) {
2460                 sp->free(sp);
2461                 return QLA_FUNCTION_FAILED;
2462         }
2463
2464         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2465
2466         elsio->u.els_logo.els_cmd = els_opcode;
2467         logo_pyld.opcode = els_opcode;
2468         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2469         logo_pyld.s_id[1] = vha->d_id.b.area;
2470         logo_pyld.s_id[2] = vha->d_id.b.domain;
2471         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2472         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2473
2474         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2475             sizeof(struct els_logo_payload));
2476
2477         rval = qla2x00_start_sp(sp);
2478         if (rval != QLA_SUCCESS) {
2479                 sp->free(sp);
2480                 return QLA_FUNCTION_FAILED;
2481         }
2482
2483         ql_dbg(ql_dbg_io, vha, 0x3074,
2484             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2485             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2486             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2487
2488         wait_for_completion(&elsio->u.els_logo.comp);
2489
2490         sp->free(sp);
2491         return rval;
2492 }
2493
2494 static void
2495 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2496 {
2497         scsi_qla_host_t *vha = sp->vha;
2498         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2499
2500         els_iocb->entry_type = ELS_IOCB_TYPE;
2501         els_iocb->entry_count = 1;
2502         els_iocb->sys_define = 0;
2503         els_iocb->entry_status = 0;
2504         els_iocb->handle = sp->handle;
2505         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2506         els_iocb->tx_dsd_count = 1;
2507         els_iocb->vp_index = vha->vp_idx;
2508         els_iocb->sof_type = EST_SOFI3;
2509         els_iocb->rx_dsd_count = 0;
2510         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2511
2512         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2513         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2514         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2515         els_iocb->s_id[0] = vha->d_id.b.al_pa;
2516         els_iocb->s_id[1] = vha->d_id.b.area;
2517         els_iocb->s_id[2] = vha->d_id.b.domain;
2518         els_iocb->control_flags = 0;
2519
2520         if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2521                 els_iocb->tx_byte_count = els_iocb->tx_len =
2522                         sizeof(struct els_plogi_payload);
2523                 els_iocb->tx_address[0] =
2524                         cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2525                 els_iocb->tx_address[1] =
2526                         cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2527
2528                 els_iocb->rx_dsd_count = 1;
2529                 els_iocb->rx_byte_count = els_iocb->rx_len =
2530                         sizeof(struct els_plogi_payload);
2531                 els_iocb->rx_address[0] =
2532                         cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2533                 els_iocb->rx_address[1] =
2534                         cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2535
2536                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2537                     "PLOGI ELS IOCB:\n");
2538                 ql_dump_buffer(ql_log_info, vha, 0x0109,
2539                     (uint8_t *)els_iocb, 0x70);
2540         } else {
2541                 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2542                 els_iocb->tx_address[0] =
2543                     cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2544                 els_iocb->tx_address[1] =
2545                     cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2546                 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2547
2548                 els_iocb->rx_byte_count = 0;
2549                 els_iocb->rx_address[0] = 0;
2550                 els_iocb->rx_address[1] = 0;
2551                 els_iocb->rx_len = 0;
2552         }
2553
2554         sp->vha->qla_stats.control_requests++;
2555 }
2556
2557 static void
2558 qla2x00_els_dcmd2_iocb_timeout(void *data)
2559 {
2560         srb_t *sp = data;
2561         fc_port_t *fcport = sp->fcport;
2562         struct scsi_qla_host *vha = sp->vha;
2563         struct qla_hw_data *ha = vha->hw;
2564         unsigned long flags = 0;
2565         int res;
2566
2567         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2568             "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2569             sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2570
2571         /* Abort the exchange */
2572         spin_lock_irqsave(&ha->hardware_lock, flags);
2573         res = ha->isp_ops->abort_command(sp);
2574         ql_dbg(ql_dbg_io, vha, 0x3070,
2575             "mbx abort_command %s\n",
2576             (res == QLA_SUCCESS) ? "successful" : "failed");
2577         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2578
2579         sp->done(sp, QLA_FUNCTION_TIMEOUT);
2580 }
2581
2582 static void
2583 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2584 {
2585         srb_t *sp = ptr;
2586         fc_port_t *fcport = sp->fcport;
2587         struct srb_iocb *lio = &sp->u.iocb_cmd;
2588         struct scsi_qla_host *vha = sp->vha;
2589         struct event_arg ea;
2590         struct qla_work_evt *e;
2591
2592         ql_dbg(ql_dbg_disc, vha, 0x3072,
2593             "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2594             sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2595
2596         fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2597         del_timer(&sp->u.iocb_cmd.timer);
2598
2599         if (sp->flags & SRB_WAKEUP_ON_COMP)
2600                 complete(&lio->u.els_plogi.comp);
2601         else {
2602                 if (res) {
2603                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2604                 } else {
2605                         memset(&ea, 0, sizeof(ea));
2606                         ea.fcport = fcport;
2607                         ea.rc = res;
2608                         ea.event = FCME_ELS_PLOGI_DONE;
2609                         qla2x00_fcport_event_handler(vha, &ea);
2610                 }
2611
2612                 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2613                 if (!e) {
2614                         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2615
2616                         if (elsio->u.els_plogi.els_plogi_pyld)
2617                                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2618                                     elsio->u.els_plogi.tx_size,
2619                                     elsio->u.els_plogi.els_plogi_pyld,
2620                                     elsio->u.els_plogi.els_plogi_pyld_dma);
2621
2622                         if (elsio->u.els_plogi.els_resp_pyld)
2623                                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2624                                     elsio->u.els_plogi.rx_size,
2625                                     elsio->u.els_plogi.els_resp_pyld,
2626                                     elsio->u.els_plogi.els_resp_pyld_dma);
2627                         sp->free(sp);
2628                         return;
2629                 }
2630                 e->u.iosb.sp = sp;
2631                 qla2x00_post_work(vha, e);
2632         }
2633 }
2634
2635 int
2636 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2637     fc_port_t *fcport, bool wait)
2638 {
2639         srb_t *sp;
2640         struct srb_iocb *elsio = NULL;
2641         struct qla_hw_data *ha = vha->hw;
2642         int rval = QLA_SUCCESS;
2643         void    *ptr, *resp_ptr;
2644
2645         /* Alloc SRB structure */
2646         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2647         if (!sp) {
2648                 ql_log(ql_log_info, vha, 0x70e6,
2649                  "SRB allocation failed\n");
2650                 return -ENOMEM;
2651         }
2652
2653         elsio = &sp->u.iocb_cmd;
2654         ql_dbg(ql_dbg_io, vha, 0x3073,
2655             "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2656
2657         fcport->flags |= FCF_ASYNC_SENT;
2658         sp->type = SRB_ELS_DCMD;
2659         sp->name = "ELS_DCMD";
2660         sp->fcport = fcport;
2661
2662         elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2663         init_completion(&elsio->u.els_plogi.comp);
2664         if (wait)
2665                 sp->flags = SRB_WAKEUP_ON_COMP;
2666
2667         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2668
2669         sp->done = qla2x00_els_dcmd2_sp_done;
2670         elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2671
2672         ptr = elsio->u.els_plogi.els_plogi_pyld =
2673             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2674                 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2675
2676         if (!elsio->u.els_plogi.els_plogi_pyld) {
2677                 rval = QLA_FUNCTION_FAILED;
2678                 goto out;
2679         }
2680
2681         resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2682             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2683                 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2684
2685         if (!elsio->u.els_plogi.els_resp_pyld) {
2686                 rval = QLA_FUNCTION_FAILED;
2687                 goto out;
2688         }
2689
2690         ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2691
2692         memset(ptr, 0, sizeof(struct els_plogi_payload));
2693         memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2694         memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2695             &ha->plogi_els_payld.data,
2696             sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2697
2698         elsio->u.els_plogi.els_cmd = els_opcode;
2699         elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2700
2701         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2702         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2703             (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2704
2705         rval = qla2x00_start_sp(sp);
2706         if (rval != QLA_SUCCESS) {
2707                 rval = QLA_FUNCTION_FAILED;
2708         } else {
2709                 ql_dbg(ql_dbg_disc, vha, 0x3074,
2710                     "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2711                     sp->name, sp->handle, fcport->loop_id,
2712                     fcport->d_id.b24, vha->d_id.b24);
2713         }
2714
2715         if (wait) {
2716                 wait_for_completion(&elsio->u.els_plogi.comp);
2717
2718                 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2719                         rval = QLA_FUNCTION_FAILED;
2720         } else {
2721                 goto done;
2722         }
2723
2724 out:
2725         fcport->flags &= ~(FCF_ASYNC_SENT);
2726         if (elsio->u.els_plogi.els_plogi_pyld)
2727                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2728                     elsio->u.els_plogi.tx_size,
2729                     elsio->u.els_plogi.els_plogi_pyld,
2730                     elsio->u.els_plogi.els_plogi_pyld_dma);
2731
2732         if (elsio->u.els_plogi.els_resp_pyld)
2733                 dma_free_coherent(&sp->vha->hw->pdev->dev,
2734                     elsio->u.els_plogi.rx_size,
2735                     elsio->u.els_plogi.els_resp_pyld,
2736                     elsio->u.els_plogi.els_resp_pyld_dma);
2737
2738         sp->free(sp);
2739 done:
2740         return rval;
2741 }
2742
2743 static void
2744 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2745 {
2746         struct bsg_job *bsg_job = sp->u.bsg_job;
2747         struct fc_bsg_request *bsg_request = bsg_job->request;
2748
2749         els_iocb->entry_type = ELS_IOCB_TYPE;
2750         els_iocb->entry_count = 1;
2751         els_iocb->sys_define = 0;
2752         els_iocb->entry_status = 0;
2753         els_iocb->handle = sp->handle;
2754         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2755         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2756         els_iocb->vp_index = sp->vha->vp_idx;
2757         els_iocb->sof_type = EST_SOFI3;
2758         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2759
2760         els_iocb->opcode =
2761             sp->type == SRB_ELS_CMD_RPT ?
2762             bsg_request->rqst_data.r_els.els_code :
2763             bsg_request->rqst_data.h_els.command_code;
2764         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2765         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2766         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2767         els_iocb->control_flags = 0;
2768         els_iocb->rx_byte_count =
2769             cpu_to_le32(bsg_job->reply_payload.payload_len);
2770         els_iocb->tx_byte_count =
2771             cpu_to_le32(bsg_job->request_payload.payload_len);
2772
2773         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2774             (bsg_job->request_payload.sg_list)));
2775         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2776             (bsg_job->request_payload.sg_list)));
2777         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2778             (bsg_job->request_payload.sg_list));
2779
2780         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2781             (bsg_job->reply_payload.sg_list)));
2782         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2783             (bsg_job->reply_payload.sg_list)));
2784         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2785             (bsg_job->reply_payload.sg_list));
2786
2787         sp->vha->qla_stats.control_requests++;
2788 }
2789
2790 static void
2791 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2792 {
2793         uint16_t        avail_dsds;
2794         uint32_t        *cur_dsd;
2795         struct scatterlist *sg;
2796         int index;
2797         uint16_t tot_dsds;
2798         scsi_qla_host_t *vha = sp->vha;
2799         struct qla_hw_data *ha = vha->hw;
2800         struct bsg_job *bsg_job = sp->u.bsg_job;
2801         int loop_iterartion = 0;
2802         int entry_count = 1;
2803
2804         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2805         ct_iocb->entry_type = CT_IOCB_TYPE;
2806         ct_iocb->entry_status = 0;
2807         ct_iocb->handle1 = sp->handle;
2808         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2809         ct_iocb->status = cpu_to_le16(0);
2810         ct_iocb->control_flags = cpu_to_le16(0);
2811         ct_iocb->timeout = 0;
2812         ct_iocb->cmd_dsd_count =
2813             cpu_to_le16(bsg_job->request_payload.sg_cnt);
2814         ct_iocb->total_dsd_count =
2815             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2816         ct_iocb->req_bytecount =
2817             cpu_to_le32(bsg_job->request_payload.payload_len);
2818         ct_iocb->rsp_bytecount =
2819             cpu_to_le32(bsg_job->reply_payload.payload_len);
2820
2821         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2822             (bsg_job->request_payload.sg_list)));
2823         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2824             (bsg_job->request_payload.sg_list)));
2825         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2826
2827         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2828             (bsg_job->reply_payload.sg_list)));
2829         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2830             (bsg_job->reply_payload.sg_list)));
2831         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2832
2833         avail_dsds = 1;
2834         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2835         index = 0;
2836         tot_dsds = bsg_job->reply_payload.sg_cnt;
2837
2838         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2839                 dma_addr_t       sle_dma;
2840                 cont_a64_entry_t *cont_pkt;
2841
2842                 /* Allocate additional continuation packets? */
2843                 if (avail_dsds == 0) {
2844                         /*
2845                         * Five DSDs are available in the Cont.
2846                         * Type 1 IOCB.
2847                                */
2848                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2849                             vha->hw->req_q_map[0]);
2850                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2851                         avail_dsds = 5;
2852                         entry_count++;
2853                 }
2854
2855                 sle_dma = sg_dma_address(sg);
2856                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2857                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2858                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2859                 loop_iterartion++;
2860                 avail_dsds--;
2861         }
2862         ct_iocb->entry_count = entry_count;
2863
2864         sp->vha->qla_stats.control_requests++;
2865 }
2866
2867 static void
2868 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2869 {
2870         uint16_t        avail_dsds;
2871         uint32_t        *cur_dsd;
2872         struct scatterlist *sg;
2873         int index;
2874         uint16_t cmd_dsds, rsp_dsds;
2875         scsi_qla_host_t *vha = sp->vha;
2876         struct qla_hw_data *ha = vha->hw;
2877         struct bsg_job *bsg_job = sp->u.bsg_job;
2878         int entry_count = 1;
2879         cont_a64_entry_t *cont_pkt = NULL;
2880
2881         ct_iocb->entry_type = CT_IOCB_TYPE;
2882         ct_iocb->entry_status = 0;
2883         ct_iocb->sys_define = 0;
2884         ct_iocb->handle = sp->handle;
2885
2886         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2887         ct_iocb->vp_index = sp->vha->vp_idx;
2888         ct_iocb->comp_status = cpu_to_le16(0);
2889
2890         cmd_dsds = bsg_job->request_payload.sg_cnt;
2891         rsp_dsds = bsg_job->reply_payload.sg_cnt;
2892
2893         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2894         ct_iocb->timeout = 0;
2895         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2896         ct_iocb->cmd_byte_count =
2897             cpu_to_le32(bsg_job->request_payload.payload_len);
2898
2899         avail_dsds = 2;
2900         cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2901         index = 0;
2902
2903         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2904                 dma_addr_t       sle_dma;
2905
2906                 /* Allocate additional continuation packets? */
2907                 if (avail_dsds == 0) {
2908                         /*
2909                          * Five DSDs are available in the Cont.
2910                          * Type 1 IOCB.
2911                          */
2912                         cont_pkt = qla2x00_prep_cont_type1_iocb(
2913                             vha, ha->req_q_map[0]);
2914                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2915                         avail_dsds = 5;
2916                         entry_count++;
2917                 }
2918
2919                 sle_dma = sg_dma_address(sg);
2920                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2921                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2922                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2923                 avail_dsds--;
2924         }
2925
2926         index = 0;
2927
2928         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2929                 dma_addr_t       sle_dma;
2930
2931                 /* Allocate additional continuation packets? */
2932                 if (avail_dsds == 0) {
2933                         /*
2934                         * Five DSDs are available in the Cont.
2935                         * Type 1 IOCB.
2936                                */
2937                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2938                             ha->req_q_map[0]);
2939                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2940                         avail_dsds = 5;
2941                         entry_count++;
2942                 }
2943
2944                 sle_dma = sg_dma_address(sg);
2945                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2946                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2947                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2948                 avail_dsds--;
2949         }
2950         ct_iocb->entry_count = entry_count;
2951 }
2952
2953 /*
2954  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2955  * @sp: command to send to the ISP
2956  *
2957  * Returns non-zero if a failure occurred, else zero.
2958  */
2959 int
2960 qla82xx_start_scsi(srb_t *sp)
2961 {
2962         int             nseg;
2963         unsigned long   flags;
2964         struct scsi_cmnd *cmd;
2965         uint32_t        *clr_ptr;
2966         uint32_t        index;
2967         uint32_t        handle;
2968         uint16_t        cnt;
2969         uint16_t        req_cnt;
2970         uint16_t        tot_dsds;
2971         struct device_reg_82xx __iomem *reg;
2972         uint32_t dbval;
2973         uint32_t *fcp_dl;
2974         uint8_t additional_cdb_len;
2975         struct ct6_dsd *ctx;
2976         struct scsi_qla_host *vha = sp->vha;
2977         struct qla_hw_data *ha = vha->hw;
2978         struct req_que *req = NULL;
2979         struct rsp_que *rsp = NULL;
2980
2981         /* Setup device pointers. */
2982         reg = &ha->iobase->isp82;
2983         cmd = GET_CMD_SP(sp);
2984         req = vha->req;
2985         rsp = ha->rsp_q_map[0];
2986
2987         /* So we know we haven't pci_map'ed anything yet */
2988         tot_dsds = 0;
2989
2990         dbval = 0x04 | (ha->portnum << 5);
2991
2992         /* Send marker if required */
2993         if (vha->marker_needed != 0) {
2994                 if (qla2x00_marker(vha, req,
2995                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2996                         ql_log(ql_log_warn, vha, 0x300c,
2997                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2998                         return QLA_FUNCTION_FAILED;
2999                 }
3000                 vha->marker_needed = 0;
3001         }
3002
3003         /* Acquire ring specific lock */
3004         spin_lock_irqsave(&ha->hardware_lock, flags);
3005
3006         /* Check for room in outstanding command list. */
3007         handle = req->current_outstanding_cmd;
3008         for (index = 1; index < req->num_outstanding_cmds; index++) {
3009                 handle++;
3010                 if (handle == req->num_outstanding_cmds)
3011                         handle = 1;
3012                 if (!req->outstanding_cmds[handle])
3013                         break;
3014         }
3015         if (index == req->num_outstanding_cmds)
3016                 goto queuing_error;
3017
3018         /* Map the sg table so we have an accurate count of sg entries needed */
3019         if (scsi_sg_count(cmd)) {
3020                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3021                     scsi_sg_count(cmd), cmd->sc_data_direction);
3022                 if (unlikely(!nseg))
3023                         goto queuing_error;
3024         } else
3025                 nseg = 0;
3026
3027         tot_dsds = nseg;
3028
3029         if (tot_dsds > ql2xshiftctondsd) {
3030                 struct cmd_type_6 *cmd_pkt;
3031                 uint16_t more_dsd_lists = 0;
3032                 struct dsd_dma *dsd_ptr;
3033                 uint16_t i;
3034
3035                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3036                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3037                         ql_dbg(ql_dbg_io, vha, 0x300d,
3038                             "Num of DSD list %d is than %d for cmd=%p.\n",
3039                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3040                             cmd);
3041                         goto queuing_error;
3042                 }
3043
3044                 if (more_dsd_lists <= ha->gbl_dsd_avail)
3045                         goto sufficient_dsds;
3046                 else
3047                         more_dsd_lists -= ha->gbl_dsd_avail;
3048
3049                 for (i = 0; i < more_dsd_lists; i++) {
3050                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3051                         if (!dsd_ptr) {
3052                                 ql_log(ql_log_fatal, vha, 0x300e,
3053                                     "Failed to allocate memory for dsd_dma "
3054                                     "for cmd=%p.\n", cmd);
3055                                 goto queuing_error;
3056                         }
3057
3058                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3059                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3060                         if (!dsd_ptr->dsd_addr) {
3061                                 kfree(dsd_ptr);
3062                                 ql_log(ql_log_fatal, vha, 0x300f,
3063                                     "Failed to allocate memory for dsd_addr "
3064                                     "for cmd=%p.\n", cmd);
3065                                 goto queuing_error;
3066                         }
3067                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3068                         ha->gbl_dsd_avail++;
3069                 }
3070
3071 sufficient_dsds:
3072                 req_cnt = 1;
3073
3074                 if (req->cnt < (req_cnt + 2)) {
3075                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3076                                 &reg->req_q_out[0]);
3077                         if (req->ring_index < cnt)
3078                                 req->cnt = cnt - req->ring_index;
3079                         else
3080                                 req->cnt = req->length -
3081                                         (req->ring_index - cnt);
3082                         if (req->cnt < (req_cnt + 2))
3083                                 goto queuing_error;
3084                 }
3085
3086                 ctx = sp->u.scmd.ctx =
3087                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3088                 if (!ctx) {
3089                         ql_log(ql_log_fatal, vha, 0x3010,
3090                             "Failed to allocate ctx for cmd=%p.\n", cmd);
3091                         goto queuing_error;
3092                 }
3093
3094                 memset(ctx, 0, sizeof(struct ct6_dsd));
3095                 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3096                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3097                 if (!ctx->fcp_cmnd) {
3098                         ql_log(ql_log_fatal, vha, 0x3011,
3099                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3100                         goto queuing_error;
3101                 }
3102
3103                 /* Initialize the DSD list and dma handle */
3104                 INIT_LIST_HEAD(&ctx->dsd_list);
3105                 ctx->dsd_use_cnt = 0;
3106
3107                 if (cmd->cmd_len > 16) {
3108                         additional_cdb_len = cmd->cmd_len - 16;
3109                         if ((cmd->cmd_len % 4) != 0) {
3110                                 /* SCSI command bigger than 16 bytes must be
3111                                  * multiple of 4
3112                                  */
3113                                 ql_log(ql_log_warn, vha, 0x3012,
3114                                     "scsi cmd len %d not multiple of 4 "
3115                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3116                                 goto queuing_error_fcp_cmnd;
3117                         }
3118                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3119                 } else {
3120                         additional_cdb_len = 0;
3121                         ctx->fcp_cmnd_len = 12 + 16 + 4;
3122                 }
3123
3124                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3125                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3126
3127                 /* Zero out remaining portion of packet. */
3128                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3129                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3130                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3131                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3132
3133                 /* Set NPORT-ID and LUN number*/
3134                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3135                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3136                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3137                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3138                 cmd_pkt->vp_index = sp->vha->vp_idx;
3139
3140                 /* Build IOCB segments */
3141                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3142                         goto queuing_error_fcp_cmnd;
3143
3144                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3145                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3146
3147                 /* build FCP_CMND IU */
3148                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3149                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3150
3151                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3152                         ctx->fcp_cmnd->additional_cdb_len |= 1;
3153                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3154                         ctx->fcp_cmnd->additional_cdb_len |= 2;
3155
3156                 /* Populate the FCP_PRIO. */
3157                 if (ha->flags.fcp_prio_enabled)
3158                         ctx->fcp_cmnd->task_attribute |=
3159                             sp->fcport->fcp_prio << 3;
3160
3161                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3162
3163                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3164                     additional_cdb_len);
3165                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3166
3167                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3168                 cmd_pkt->fcp_cmnd_dseg_address[0] =
3169                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3170                 cmd_pkt->fcp_cmnd_dseg_address[1] =
3171                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3172
3173                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3174                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3175                 /* Set total data segment count. */
3176                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3177                 /* Specify response queue number where
3178                  * completion should happen
3179                  */
3180                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3181         } else {
3182                 struct cmd_type_7 *cmd_pkt;
3183                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3184                 if (req->cnt < (req_cnt + 2)) {
3185                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3186                             &reg->req_q_out[0]);
3187                         if (req->ring_index < cnt)
3188                                 req->cnt = cnt - req->ring_index;
3189                         else
3190                                 req->cnt = req->length -
3191                                         (req->ring_index - cnt);
3192                 }
3193                 if (req->cnt < (req_cnt + 2))
3194                         goto queuing_error;
3195
3196                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3197                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3198
3199                 /* Zero out remaining portion of packet. */
3200                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3201                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3202                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3203                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3204
3205                 /* Set NPORT-ID and LUN number*/
3206                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3207                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3208                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3209                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3210                 cmd_pkt->vp_index = sp->vha->vp_idx;
3211
3212                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3213                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3214                     sizeof(cmd_pkt->lun));
3215
3216                 /* Populate the FCP_PRIO. */
3217                 if (ha->flags.fcp_prio_enabled)
3218                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3219
3220                 /* Load SCSI command packet. */
3221                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3222                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3223
3224                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3225
3226                 /* Build IOCB segments */
3227                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3228
3229                 /* Set total data segment count. */
3230                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3231                 /* Specify response queue number where
3232                  * completion should happen.
3233                  */
3234                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3235
3236         }
3237         /* Build command packet. */
3238         req->current_outstanding_cmd = handle;
3239         req->outstanding_cmds[handle] = sp;
3240         sp->handle = handle;
3241         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3242         req->cnt -= req_cnt;
3243         wmb();
3244
3245         /* Adjust ring index. */
3246         req->ring_index++;
3247         if (req->ring_index == req->length) {
3248                 req->ring_index = 0;
3249                 req->ring_ptr = req->ring;
3250         } else
3251                 req->ring_ptr++;
3252
3253         sp->flags |= SRB_DMA_VALID;
3254
3255         /* Set chip new ring index. */
3256         /* write, read and verify logic */
3257         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3258         if (ql2xdbwr)
3259                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3260         else {
3261                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3262                 wmb();
3263                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3264                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3265                         wmb();
3266                 }
3267         }
3268
3269         /* Manage unprocessed RIO/ZIO commands in response queue. */
3270         if (vha->flags.process_response_queue &&
3271             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3272                 qla24xx_process_response_queue(vha, rsp);
3273
3274         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3275         return QLA_SUCCESS;
3276
3277 queuing_error_fcp_cmnd:
3278         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3279 queuing_error:
3280         if (tot_dsds)
3281                 scsi_dma_unmap(cmd);
3282
3283         if (sp->u.scmd.ctx) {
3284                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3285                 sp->u.scmd.ctx = NULL;
3286         }
3287         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3288
3289         return QLA_FUNCTION_FAILED;
3290 }
3291
3292 static void
3293 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3294 {
3295         struct srb_iocb *aio = &sp->u.iocb_cmd;
3296         scsi_qla_host_t *vha = sp->vha;
3297         struct req_que *req = sp->qpair->req;
3298
3299         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3300         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3301         abt_iocb->entry_count = 1;
3302         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3303         if (sp->fcport) {
3304                 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3305                 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3306                 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3307                 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3308         }
3309         abt_iocb->handle_to_abort =
3310             cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3311                                     aio->u.abt.cmd_hndl));
3312         abt_iocb->vp_index = vha->vp_idx;
3313         abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3314         /* Send the command to the firmware */
3315         wmb();
3316 }
3317
3318 static void
3319 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3320 {
3321         int i, sz;
3322
3323         mbx->entry_type = MBX_IOCB_TYPE;
3324         mbx->handle = sp->handle;
3325         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3326
3327         for (i = 0; i < sz; i++)
3328                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3329 }
3330
3331 static void
3332 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3333 {
3334         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3335         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3336         ct_pkt->handle = sp->handle;
3337 }
3338
3339 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3340         struct nack_to_isp *nack)
3341 {
3342         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3343
3344         nack->entry_type = NOTIFY_ACK_TYPE;
3345         nack->entry_count = 1;
3346         nack->ox_id = ntfy->ox_id;
3347
3348         nack->u.isp24.handle = sp->handle;
3349         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3350         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3351                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3352                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3353         }
3354         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3355         nack->u.isp24.status = ntfy->u.isp24.status;
3356         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3357         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3358         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3359         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3360         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3361         nack->u.isp24.srr_flags = 0;
3362         nack->u.isp24.srr_reject_code = 0;
3363         nack->u.isp24.srr_reject_code_expl = 0;
3364         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3365 }
3366
3367 /*
3368  * Build NVME LS request
3369  */
3370 static int
3371 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3372 {
3373         struct srb_iocb *nvme;
3374         int     rval = QLA_SUCCESS;
3375
3376         nvme = &sp->u.iocb_cmd;
3377         cmd_pkt->entry_type = PT_LS4_REQUEST;
3378         cmd_pkt->entry_count = 1;
3379         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3380
3381         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3382         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3383         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3384
3385         cmd_pkt->tx_dseg_count = 1;
3386         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3387         cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3388         cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3389         cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3390
3391         cmd_pkt->rx_dseg_count = 1;
3392         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3393         cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
3394         cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3395         cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3396
3397         return rval;
3398 }
3399
3400 static void
3401 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3402 {
3403         int map, pos;
3404
3405         vce->entry_type = VP_CTRL_IOCB_TYPE;
3406         vce->handle = sp->handle;
3407         vce->entry_count = 1;
3408         vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3409         vce->vp_count = cpu_to_le16(1);
3410
3411         /*
3412          * index map in firmware starts with 1; decrement index
3413          * this is ok as we never use index 0
3414          */
3415         map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3416         pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3417         vce->vp_idx_map[map] |= 1 << pos;
3418 }
3419
3420 static void
3421 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3422 {
3423         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3424         logio->control_flags =
3425             cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3426
3427         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3428         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3429         logio->port_id[1] = sp->fcport->d_id.b.area;
3430         logio->port_id[2] = sp->fcport->d_id.b.domain;
3431         logio->vp_index = sp->fcport->vha->vp_idx;
3432 }
3433
3434 int
3435 qla2x00_start_sp(srb_t *sp)
3436 {
3437         int rval;
3438         scsi_qla_host_t *vha = sp->vha;
3439         struct qla_hw_data *ha = vha->hw;
3440         struct qla_qpair *qp = sp->qpair;
3441         void *pkt;
3442         unsigned long flags;
3443
3444         rval = QLA_FUNCTION_FAILED;
3445         spin_lock_irqsave(qp->qp_lock_ptr, flags);
3446         pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3447         if (!pkt) {
3448                 ql_log(ql_log_warn, vha, 0x700c,
3449                     "qla2x00_alloc_iocbs failed.\n");
3450                 goto done;
3451         }
3452
3453         rval = QLA_SUCCESS;
3454         switch (sp->type) {
3455         case SRB_LOGIN_CMD:
3456                 IS_FWI2_CAPABLE(ha) ?
3457                     qla24xx_login_iocb(sp, pkt) :
3458                     qla2x00_login_iocb(sp, pkt);
3459                 break;
3460         case SRB_PRLI_CMD:
3461                 qla24xx_prli_iocb(sp, pkt);
3462                 break;
3463         case SRB_LOGOUT_CMD:
3464                 IS_FWI2_CAPABLE(ha) ?
3465                     qla24xx_logout_iocb(sp, pkt) :
3466                     qla2x00_logout_iocb(sp, pkt);
3467                 break;
3468         case SRB_ELS_CMD_RPT:
3469         case SRB_ELS_CMD_HST:
3470                 qla24xx_els_iocb(sp, pkt);
3471                 break;
3472         case SRB_CT_CMD:
3473                 IS_FWI2_CAPABLE(ha) ?
3474                     qla24xx_ct_iocb(sp, pkt) :
3475                     qla2x00_ct_iocb(sp, pkt);
3476                 break;
3477         case SRB_ADISC_CMD:
3478                 IS_FWI2_CAPABLE(ha) ?
3479                     qla24xx_adisc_iocb(sp, pkt) :
3480                     qla2x00_adisc_iocb(sp, pkt);
3481                 break;
3482         case SRB_TM_CMD:
3483                 IS_QLAFX00(ha) ?
3484                     qlafx00_tm_iocb(sp, pkt) :
3485                     qla24xx_tm_iocb(sp, pkt);
3486                 break;
3487         case SRB_FXIOCB_DCMD:
3488         case SRB_FXIOCB_BCMD:
3489                 qlafx00_fxdisc_iocb(sp, pkt);
3490                 break;
3491         case SRB_NVME_LS:
3492                 qla_nvme_ls(sp, pkt);
3493                 break;
3494         case SRB_ABT_CMD:
3495                 IS_QLAFX00(ha) ?
3496                         qlafx00_abort_iocb(sp, pkt) :
3497                         qla24xx_abort_iocb(sp, pkt);
3498                 break;
3499         case SRB_ELS_DCMD:
3500                 qla24xx_els_logo_iocb(sp, pkt);
3501                 break;
3502         case SRB_CT_PTHRU_CMD:
3503                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3504                 break;
3505         case SRB_MB_IOCB:
3506                 qla2x00_mb_iocb(sp, pkt);
3507                 break;
3508         case SRB_NACK_PLOGI:
3509         case SRB_NACK_PRLI:
3510         case SRB_NACK_LOGO:
3511                 qla2x00_send_notify_ack_iocb(sp, pkt);
3512                 break;
3513         case SRB_CTRL_VP:
3514                 qla25xx_ctrlvp_iocb(sp, pkt);
3515                 break;
3516         case SRB_PRLO_CMD:
3517                 qla24xx_prlo_iocb(sp, pkt);
3518                 break;
3519         default:
3520                 break;
3521         }
3522
3523         wmb();
3524         qla2x00_start_iocbs(vha, qp->req);
3525 done:
3526         spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3527         return rval;
3528 }
3529
3530 static void
3531 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3532                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3533 {
3534         uint16_t avail_dsds;
3535         uint32_t *cur_dsd;
3536         uint32_t req_data_len = 0;
3537         uint32_t rsp_data_len = 0;
3538         struct scatterlist *sg;
3539         int index;
3540         int entry_count = 1;
3541         struct bsg_job *bsg_job = sp->u.bsg_job;
3542
3543         /*Update entry type to indicate bidir command */
3544         *((uint32_t *)(&cmd_pkt->entry_type)) =
3545                 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3546
3547         /* Set the transfer direction, in this set both flags
3548          * Also set the BD_WRAP_BACK flag, firmware will take care
3549          * assigning DID=SID for outgoing pkts.
3550          */
3551         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3552         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3553         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3554                                                         BD_WRAP_BACK);
3555
3556         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3557         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3558         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3559         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3560
3561         vha->bidi_stats.transfer_bytes += req_data_len;
3562         vha->bidi_stats.io_count++;
3563
3564         vha->qla_stats.output_bytes += req_data_len;
3565         vha->qla_stats.output_requests++;
3566
3567         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3568          * are bundled in continuation iocb
3569          */
3570         avail_dsds = 1;
3571         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3572
3573         index = 0;
3574
3575         for_each_sg(bsg_job->request_payload.sg_list, sg,
3576                                 bsg_job->request_payload.sg_cnt, index) {
3577                 dma_addr_t sle_dma;
3578                 cont_a64_entry_t *cont_pkt;
3579
3580                 /* Allocate additional continuation packets */
3581                 if (avail_dsds == 0) {
3582                         /* Continuation type 1 IOCB can accomodate
3583                          * 5 DSDS
3584                          */
3585                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3586                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3587                         avail_dsds = 5;
3588                         entry_count++;
3589                 }
3590                 sle_dma = sg_dma_address(sg);
3591                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3592                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3593                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3594                 avail_dsds--;
3595         }
3596         /* For read request DSD will always goes to continuation IOCB
3597          * and follow the write DSD. If there is room on the current IOCB
3598          * then it is added to that IOCB else new continuation IOCB is
3599          * allocated.
3600          */
3601         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3602                                 bsg_job->reply_payload.sg_cnt, index) {
3603                 dma_addr_t sle_dma;
3604                 cont_a64_entry_t *cont_pkt;
3605
3606                 /* Allocate additional continuation packets */
3607                 if (avail_dsds == 0) {
3608                         /* Continuation type 1 IOCB can accomodate
3609                          * 5 DSDS
3610                          */
3611                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3612                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3613                         avail_dsds = 5;
3614                         entry_count++;
3615                 }
3616                 sle_dma = sg_dma_address(sg);
3617                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3618                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3619                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3620                 avail_dsds--;
3621         }
3622         /* This value should be same as number of IOCB required for this cmd */
3623         cmd_pkt->entry_count = entry_count;
3624 }
3625
3626 int
3627 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3628 {
3629
3630         struct qla_hw_data *ha = vha->hw;
3631         unsigned long flags;
3632         uint32_t handle;
3633         uint32_t index;
3634         uint16_t req_cnt;
3635         uint16_t cnt;
3636         uint32_t *clr_ptr;
3637         struct cmd_bidir *cmd_pkt = NULL;
3638         struct rsp_que *rsp;
3639         struct req_que *req;
3640         int rval = EXT_STATUS_OK;
3641
3642         rval = QLA_SUCCESS;
3643
3644         rsp = ha->rsp_q_map[0];
3645         req = vha->req;
3646
3647         /* Send marker if required */
3648         if (vha->marker_needed != 0) {
3649                 if (qla2x00_marker(vha, req,
3650                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3651                         return EXT_STATUS_MAILBOX;
3652                 vha->marker_needed = 0;
3653         }
3654
3655         /* Acquire ring specific lock */
3656         spin_lock_irqsave(&ha->hardware_lock, flags);
3657
3658         /* Check for room in outstanding command list. */
3659         handle = req->current_outstanding_cmd;
3660         for (index = 1; index < req->num_outstanding_cmds; index++) {
3661                 handle++;
3662                 if (handle == req->num_outstanding_cmds)
3663                         handle = 1;
3664                 if (!req->outstanding_cmds[handle])
3665                         break;
3666         }
3667
3668         if (index == req->num_outstanding_cmds) {
3669                 rval = EXT_STATUS_BUSY;
3670                 goto queuing_error;
3671         }
3672
3673         /* Calculate number of IOCB required */
3674         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3675
3676         /* Check for room on request queue. */
3677         if (req->cnt < req_cnt + 2) {
3678                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3679                     RD_REG_DWORD_RELAXED(req->req_q_out);
3680                 if  (req->ring_index < cnt)
3681                         req->cnt = cnt - req->ring_index;
3682                 else
3683                         req->cnt = req->length -
3684                                 (req->ring_index - cnt);
3685         }
3686         if (req->cnt < req_cnt + 2) {
3687                 rval = EXT_STATUS_BUSY;
3688                 goto queuing_error;
3689         }
3690
3691         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3692         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3693
3694         /* Zero out remaining portion of packet. */
3695         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3696         clr_ptr = (uint32_t *)cmd_pkt + 2;
3697         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3698
3699         /* Set NPORT-ID  (of vha)*/
3700         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3701         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3702         cmd_pkt->port_id[1] = vha->d_id.b.area;
3703         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3704
3705         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3706         cmd_pkt->entry_status = (uint8_t) rsp->id;
3707         /* Build command packet. */
3708         req->current_outstanding_cmd = handle;
3709         req->outstanding_cmds[handle] = sp;
3710         sp->handle = handle;
3711         req->cnt -= req_cnt;
3712
3713         /* Send the command to the firmware */
3714         wmb();
3715         qla2x00_start_iocbs(vha, req);
3716 queuing_error:
3717         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3718         return rval;
3719 }