]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/lpfc/lpfc_scsi.c
2e73331cf53a02d12d408b258fb9d1675df83300
[linux.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_version.h"
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52
53 #define LPFC_RESET_WAIT  2
54 #define LPFC_ABORT_WAIT  2
55
56 int _dump_buf_done = 1;
57
58 static char *dif_op_str[] = {
59         "PROT_NORMAL",
60         "PROT_READ_INSERT",
61         "PROT_WRITE_STRIP",
62         "PROT_READ_STRIP",
63         "PROT_WRITE_INSERT",
64         "PROT_READ_PASS",
65         "PROT_WRITE_PASS",
66 };
67
68 struct scsi_dif_tuple {
69         __be16 guard_tag;       /* Checksum */
70         __be16 app_tag;         /* Opaque storage */
71         __be32 ref_tag;         /* Target LBA or indirect LBA */
72 };
73
74 static struct lpfc_rport_data *
75 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
76 {
77         struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
78
79         if (vport->phba->cfg_fof)
80                 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
81         else
82                 return (struct lpfc_rport_data *)sdev->hostdata;
83 }
84
85 static void
86 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
87 static void
88 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
89 static int
90 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
91
92 static void
93 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
94 {
95         void *src, *dst;
96         struct scatterlist *sgde = scsi_sglist(cmnd);
97
98         if (!_dump_buf_data) {
99                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
100                         "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
101                                 __func__);
102                 return;
103         }
104
105
106         if (!sgde) {
107                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
108                         "9051 BLKGRD: ERROR: data scatterlist is null\n");
109                 return;
110         }
111
112         dst = (void *) _dump_buf_data;
113         while (sgde) {
114                 src = sg_virt(sgde);
115                 memcpy(dst, src, sgde->length);
116                 dst += sgde->length;
117                 sgde = sg_next(sgde);
118         }
119 }
120
121 static void
122 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
123 {
124         void *src, *dst;
125         struct scatterlist *sgde = scsi_prot_sglist(cmnd);
126
127         if (!_dump_buf_dif) {
128                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
129                         "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
130                                 __func__);
131                 return;
132         }
133
134         if (!sgde) {
135                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
136                         "9053 BLKGRD: ERROR: prot scatterlist is null\n");
137                 return;
138         }
139
140         dst = _dump_buf_dif;
141         while (sgde) {
142                 src = sg_virt(sgde);
143                 memcpy(dst, src, sgde->length);
144                 dst += sgde->length;
145                 sgde = sg_next(sgde);
146         }
147 }
148
149 static inline unsigned
150 lpfc_cmd_blksize(struct scsi_cmnd *sc)
151 {
152         return sc->device->sector_size;
153 }
154
155 #define LPFC_CHECK_PROTECT_GUARD        1
156 #define LPFC_CHECK_PROTECT_REF          2
157 static inline unsigned
158 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
159 {
160         return 1;
161 }
162
163 static inline unsigned
164 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
165 {
166         if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
167                 return 0;
168         if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
169                 return 1;
170         return 0;
171 }
172
173 /**
174  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
175  * @phba: Pointer to HBA object.
176  * @lpfc_cmd: lpfc scsi command object pointer.
177  *
178  * This function is called from the lpfc_prep_task_mgmt_cmd function to
179  * set the last bit in the response sge entry.
180  **/
181 static void
182 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
183                                 struct lpfc_scsi_buf *lpfc_cmd)
184 {
185         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
186         if (sgl) {
187                 sgl += 1;
188                 sgl->word2 = le32_to_cpu(sgl->word2);
189                 bf_set(lpfc_sli4_sge_last, sgl, 1);
190                 sgl->word2 = cpu_to_le32(sgl->word2);
191         }
192 }
193
194 /**
195  * lpfc_update_stats - Update statistical data for the command completion
196  * @phba: Pointer to HBA object.
197  * @lpfc_cmd: lpfc scsi command object pointer.
198  *
199  * This function is called when there is a command completion and this
200  * function updates the statistical data for the command completion.
201  **/
202 static void
203 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
204 {
205         struct lpfc_rport_data *rdata;
206         struct lpfc_nodelist *pnode;
207         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
208         unsigned long flags;
209         struct Scsi_Host  *shost = cmd->device->host;
210         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
211         unsigned long latency;
212         int i;
213
214         if (!vport->stat_data_enabled ||
215             vport->stat_data_blocked ||
216             (cmd->result))
217                 return;
218
219         latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
220         rdata = lpfc_cmd->rdata;
221         pnode = rdata->pnode;
222
223         spin_lock_irqsave(shost->host_lock, flags);
224         if (!pnode ||
225             !pnode->lat_data ||
226             (phba->bucket_type == LPFC_NO_BUCKET)) {
227                 spin_unlock_irqrestore(shost->host_lock, flags);
228                 return;
229         }
230
231         if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
232                 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
233                         phba->bucket_step;
234                 /* check array subscript bounds */
235                 if (i < 0)
236                         i = 0;
237                 else if (i >= LPFC_MAX_BUCKET_COUNT)
238                         i = LPFC_MAX_BUCKET_COUNT - 1;
239         } else {
240                 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
241                         if (latency <= (phba->bucket_base +
242                                 ((1<<i)*phba->bucket_step)))
243                                 break;
244         }
245
246         pnode->lat_data[i].cmd_count++;
247         spin_unlock_irqrestore(shost->host_lock, flags);
248 }
249
250 /**
251  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
252  * @phba: The Hba for which this call is being executed.
253  *
254  * This routine is called when there is resource error in driver or firmware.
255  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
256  * posts at most 1 event each second. This routine wakes up worker thread of
257  * @phba to process WORKER_RAM_DOWN_EVENT event.
258  *
259  * This routine should be called with no lock held.
260  **/
261 void
262 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
263 {
264         unsigned long flags;
265         uint32_t evt_posted;
266         unsigned long expires;
267
268         spin_lock_irqsave(&phba->hbalock, flags);
269         atomic_inc(&phba->num_rsrc_err);
270         phba->last_rsrc_error_time = jiffies;
271
272         expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
273         if (time_after(expires, jiffies)) {
274                 spin_unlock_irqrestore(&phba->hbalock, flags);
275                 return;
276         }
277
278         phba->last_ramp_down_time = jiffies;
279
280         spin_unlock_irqrestore(&phba->hbalock, flags);
281
282         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
283         evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
284         if (!evt_posted)
285                 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
286         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
287
288         if (!evt_posted)
289                 lpfc_worker_wake_up(phba);
290         return;
291 }
292
293 /**
294  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
295  * @phba: The Hba for which this call is being executed.
296  *
297  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
298  * thread.This routine reduces queue depth for all scsi device on each vport
299  * associated with @phba.
300  **/
301 void
302 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
303 {
304         struct lpfc_vport **vports;
305         struct Scsi_Host  *shost;
306         struct scsi_device *sdev;
307         unsigned long new_queue_depth;
308         unsigned long num_rsrc_err, num_cmd_success;
309         int i;
310
311         num_rsrc_err = atomic_read(&phba->num_rsrc_err);
312         num_cmd_success = atomic_read(&phba->num_cmd_success);
313
314         /*
315          * The error and success command counters are global per
316          * driver instance.  If another handler has already
317          * operated on this error event, just exit.
318          */
319         if (num_rsrc_err == 0)
320                 return;
321
322         vports = lpfc_create_vport_work_array(phba);
323         if (vports != NULL)
324                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
325                         shost = lpfc_shost_from_vport(vports[i]);
326                         shost_for_each_device(sdev, shost) {
327                                 new_queue_depth =
328                                         sdev->queue_depth * num_rsrc_err /
329                                         (num_rsrc_err + num_cmd_success);
330                                 if (!new_queue_depth)
331                                         new_queue_depth = sdev->queue_depth - 1;
332                                 else
333                                         new_queue_depth = sdev->queue_depth -
334                                                                 new_queue_depth;
335                                 scsi_change_queue_depth(sdev, new_queue_depth);
336                         }
337                 }
338         lpfc_destroy_vport_work_array(phba, vports);
339         atomic_set(&phba->num_rsrc_err, 0);
340         atomic_set(&phba->num_cmd_success, 0);
341 }
342
343 /**
344  * lpfc_scsi_dev_block - set all scsi hosts to block state
345  * @phba: Pointer to HBA context object.
346  *
347  * This function walks vport list and set each SCSI host to block state
348  * by invoking fc_remote_port_delete() routine. This function is invoked
349  * with EEH when device's PCI slot has been permanently disabled.
350  **/
351 void
352 lpfc_scsi_dev_block(struct lpfc_hba *phba)
353 {
354         struct lpfc_vport **vports;
355         struct Scsi_Host  *shost;
356         struct scsi_device *sdev;
357         struct fc_rport *rport;
358         int i;
359
360         vports = lpfc_create_vport_work_array(phba);
361         if (vports != NULL)
362                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
363                         shost = lpfc_shost_from_vport(vports[i]);
364                         shost_for_each_device(sdev, shost) {
365                                 rport = starget_to_rport(scsi_target(sdev));
366                                 fc_remote_port_delete(rport);
367                         }
368                 }
369         lpfc_destroy_vport_work_array(phba, vports);
370 }
371
372 /**
373  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
374  * @vport: The virtual port for which this call being executed.
375  * @num_to_allocate: The requested number of buffers to allocate.
376  *
377  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
378  * the scsi buffer contains all the necessary information needed to initiate
379  * a SCSI I/O. The non-DMAable buffer region contains information to build
380  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
381  * and the initial BPL. In addition to allocating memory, the FCP CMND and
382  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
383  *
384  * Return codes:
385  *   int - number of scsi buffers that were allocated.
386  *   0 = failure, less than num_to_alloc is a partial failure.
387  **/
388 static int
389 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
390 {
391         struct lpfc_hba *phba = vport->phba;
392         struct lpfc_scsi_buf *psb;
393         struct ulp_bde64 *bpl;
394         IOCB_t *iocb;
395         dma_addr_t pdma_phys_fcp_cmd;
396         dma_addr_t pdma_phys_fcp_rsp;
397         dma_addr_t pdma_phys_sgl;
398         uint16_t iotag;
399         int bcnt, bpl_size;
400
401         bpl_size = phba->cfg_sg_dma_buf_size -
402                 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
403
404         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
405                          "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
406                          num_to_alloc, phba->cfg_sg_dma_buf_size,
407                          (int)sizeof(struct fcp_cmnd),
408                          (int)sizeof(struct fcp_rsp), bpl_size);
409
410         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
411                 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
412                 if (!psb)
413                         break;
414
415                 /*
416                  * Get memory from the pci pool to map the virt space to pci
417                  * bus space for an I/O.  The DMA buffer includes space for the
418                  * struct fcp_cmnd, struct fcp_rsp and the number of bde's
419                  * necessary to support the sg_tablesize.
420                  */
421                 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
422                                         GFP_KERNEL, &psb->dma_handle);
423                 if (!psb->data) {
424                         kfree(psb);
425                         break;
426                 }
427
428
429                 /* Allocate iotag for psb->cur_iocbq. */
430                 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
431                 if (iotag == 0) {
432                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
433                                       psb->data, psb->dma_handle);
434                         kfree(psb);
435                         break;
436                 }
437                 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
438
439                 psb->fcp_cmnd = psb->data;
440                 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
441                 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
442                         sizeof(struct fcp_rsp);
443
444                 /* Initialize local short-hand pointers. */
445                 bpl = psb->dma_sgl;
446                 pdma_phys_fcp_cmd = psb->dma_handle;
447                 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
448                 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
449                         sizeof(struct fcp_rsp);
450
451                 /*
452                  * The first two bdes are the FCP_CMD and FCP_RSP. The balance
453                  * are sg list bdes.  Initialize the first two and leave the
454                  * rest for queuecommand.
455                  */
456                 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
457                 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
458                 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
459                 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
460                 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
461
462                 /* Setup the physical region for the FCP RSP */
463                 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
464                 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
465                 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
466                 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
467                 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
468
469                 /*
470                  * Since the IOCB for the FCP I/O is built into this
471                  * lpfc_scsi_buf, initialize it with all known data now.
472                  */
473                 iocb = &psb->cur_iocbq.iocb;
474                 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
475                 if ((phba->sli_rev == 3) &&
476                                 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
477                         /* fill in immediate fcp command BDE */
478                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
479                         iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
480                         iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
481                                         unsli3.fcp_ext.icd);
482                         iocb->un.fcpi64.bdl.addrHigh = 0;
483                         iocb->ulpBdeCount = 0;
484                         iocb->ulpLe = 0;
485                         /* fill in response BDE */
486                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
487                                                         BUFF_TYPE_BDE_64;
488                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
489                                 sizeof(struct fcp_rsp);
490                         iocb->unsli3.fcp_ext.rbde.addrLow =
491                                 putPaddrLow(pdma_phys_fcp_rsp);
492                         iocb->unsli3.fcp_ext.rbde.addrHigh =
493                                 putPaddrHigh(pdma_phys_fcp_rsp);
494                 } else {
495                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
496                         iocb->un.fcpi64.bdl.bdeSize =
497                                         (2 * sizeof(struct ulp_bde64));
498                         iocb->un.fcpi64.bdl.addrLow =
499                                         putPaddrLow(pdma_phys_sgl);
500                         iocb->un.fcpi64.bdl.addrHigh =
501                                         putPaddrHigh(pdma_phys_sgl);
502                         iocb->ulpBdeCount = 1;
503                         iocb->ulpLe = 1;
504                 }
505                 iocb->ulpClass = CLASS3;
506                 psb->status = IOSTAT_SUCCESS;
507                 /* Put it back into the SCSI buffer list */
508                 psb->cur_iocbq.context1  = psb;
509                 lpfc_release_scsi_buf_s3(phba, psb);
510
511         }
512
513         return bcnt;
514 }
515
516 /**
517  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
518  * @vport: pointer to lpfc vport data structure.
519  *
520  * This routine is invoked by the vport cleanup for deletions and the cleanup
521  * for an ndlp on removal.
522  **/
523 void
524 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
525 {
526         struct lpfc_hba *phba = vport->phba;
527         struct lpfc_scsi_buf *psb, *next_psb;
528         unsigned long iflag = 0;
529
530         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
531                 return;
532         spin_lock_irqsave(&phba->hbalock, iflag);
533         spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
534         list_for_each_entry_safe(psb, next_psb,
535                                 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
536                 if (psb->rdata && psb->rdata->pnode
537                         && psb->rdata->pnode->vport == vport)
538                         psb->rdata = NULL;
539         }
540         spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
541         spin_unlock_irqrestore(&phba->hbalock, iflag);
542 }
543
544 /**
545  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
546  * @phba: pointer to lpfc hba data structure.
547  * @axri: pointer to the fcp xri abort wcqe structure.
548  *
549  * This routine is invoked by the worker thread to process a SLI4 fast-path
550  * FCP aborted xri.
551  **/
552 void
553 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
554                           struct sli4_wcqe_xri_aborted *axri)
555 {
556         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
557         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
558         struct lpfc_scsi_buf *psb, *next_psb;
559         unsigned long iflag = 0;
560         struct lpfc_iocbq *iocbq;
561         int i;
562         struct lpfc_nodelist *ndlp;
563         int rrq_empty = 0;
564         struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
565
566         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
567                 return;
568         spin_lock_irqsave(&phba->hbalock, iflag);
569         spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
570         list_for_each_entry_safe(psb, next_psb,
571                 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
572                 if (psb->cur_iocbq.sli4_xritag == xri) {
573                         list_del(&psb->list);
574                         psb->exch_busy = 0;
575                         psb->status = IOSTAT_SUCCESS;
576                         spin_unlock(
577                                 &phba->sli4_hba.abts_scsi_buf_list_lock);
578                         if (psb->rdata && psb->rdata->pnode)
579                                 ndlp = psb->rdata->pnode;
580                         else
581                                 ndlp = NULL;
582
583                         rrq_empty = list_empty(&phba->active_rrq_list);
584                         spin_unlock_irqrestore(&phba->hbalock, iflag);
585                         if (ndlp) {
586                                 lpfc_set_rrq_active(phba, ndlp,
587                                         psb->cur_iocbq.sli4_lxritag, rxid, 1);
588                                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
589                         }
590                         lpfc_release_scsi_buf_s4(phba, psb);
591                         if (rrq_empty)
592                                 lpfc_worker_wake_up(phba);
593                         return;
594                 }
595         }
596         spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
597         for (i = 1; i <= phba->sli.last_iotag; i++) {
598                 iocbq = phba->sli.iocbq_lookup[i];
599
600                 if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
601                         (iocbq->iocb_flag & LPFC_IO_LIBDFC))
602                         continue;
603                 if (iocbq->sli4_xritag != xri)
604                         continue;
605                 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
606                 psb->exch_busy = 0;
607                 spin_unlock_irqrestore(&phba->hbalock, iflag);
608                 if (!list_empty(&pring->txq))
609                         lpfc_worker_wake_up(phba);
610                 return;
611
612         }
613         spin_unlock_irqrestore(&phba->hbalock, iflag);
614 }
615
616 /**
617  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
618  * @phba: The HBA for which this call is being executed.
619  *
620  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
621  * and returns to caller.
622  *
623  * Return codes:
624  *   NULL - Error
625  *   Pointer to lpfc_scsi_buf - Success
626  **/
627 static struct lpfc_scsi_buf*
628 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
629 {
630         struct  lpfc_scsi_buf * lpfc_cmd = NULL;
631         struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
632         unsigned long iflag = 0;
633
634         spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
635         list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
636                          list);
637         if (!lpfc_cmd) {
638                 spin_lock(&phba->scsi_buf_list_put_lock);
639                 list_splice(&phba->lpfc_scsi_buf_list_put,
640                             &phba->lpfc_scsi_buf_list_get);
641                 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
642                 list_remove_head(scsi_buf_list_get, lpfc_cmd,
643                                  struct lpfc_scsi_buf, list);
644                 spin_unlock(&phba->scsi_buf_list_put_lock);
645         }
646         spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
647
648         if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
649                 atomic_inc(&ndlp->cmd_pending);
650                 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
651         }
652         return  lpfc_cmd;
653 }
654 /**
655  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_common_buf_list of the HBA
656  * @phba: The HBA for which this call is being executed.
657  *
658  * This routine removes a scsi buffer from head of @phba lpfc_common_buf_list
659  * and returns to caller.
660  *
661  * Return codes:
662  *   NULL - Error
663  *   Pointer to lpfc_scsi_buf - Success
664  **/
665 static struct lpfc_scsi_buf*
666 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
667 {
668         struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
669         unsigned long iflag = 0;
670         struct sli4_sge *sgl;
671         IOCB_t *iocb;
672         dma_addr_t pdma_phys_fcp_rsp;
673         dma_addr_t pdma_phys_fcp_cmd;
674         uint32_t sgl_size;
675         int found = 0;
676
677         spin_lock_irqsave(&phba->common_buf_list_get_lock, iflag);
678         list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
679                                  &phba->lpfc_common_buf_list_get, list) {
680                 if (lpfc_test_rrq_active(phba, ndlp,
681                                          lpfc_cmd->cur_iocbq.sli4_lxritag))
682                         continue;
683                 list_del_init(&lpfc_cmd->list);
684                 phba->get_common_bufs--;
685                 found = 1;
686                 break;
687         }
688         if (!found) {
689                 spin_lock(&phba->common_buf_list_put_lock);
690                 list_splice(&phba->lpfc_common_buf_list_put,
691                             &phba->lpfc_common_buf_list_get);
692                 phba->get_common_bufs += phba->put_common_bufs;
693                 INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put);
694                 phba->put_common_bufs = 0;
695                 spin_unlock(&phba->common_buf_list_put_lock);
696                 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
697                                          &phba->lpfc_common_buf_list_get,
698                                          list) {
699                         if (lpfc_test_rrq_active(
700                                 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
701                                 continue;
702                         list_del_init(&lpfc_cmd->list);
703                         phba->get_common_bufs--;
704                         found = 1;
705                         break;
706                 }
707         }
708         spin_unlock_irqrestore(&phba->common_buf_list_get_lock, iflag);
709         if (!found)
710                 return NULL;
711
712         sgl_size = phba->cfg_sg_dma_buf_size -
713                 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
714
715         /* Setup key fields in buffer that may have been changed
716          * if other protocols used this buffer.
717          */
718         lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
719         lpfc_cmd->prot_seg_cnt = 0;
720         lpfc_cmd->seg_cnt = 0;
721         lpfc_cmd->waitq = NULL;
722         lpfc_cmd->timeout = 0;
723         lpfc_cmd->flags = 0;
724         lpfc_cmd->start_time = jiffies;
725         lpfc_cmd->waitq = NULL;
726         lpfc_cmd->cpu = smp_processor_id();
727 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
728         lpfc_cmd->prot_data_type = 0;
729 #endif
730
731         lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
732         lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
733                                 sizeof(struct fcp_cmnd));
734
735         /*
736          * The first two SGEs are the FCP_CMD and FCP_RSP.
737          * The balance are sg list bdes. Initialize the
738          * first two and leave the rest for queuecommand.
739          */
740         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
741         pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size);
742         sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
743         sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
744         sgl->word2 = le32_to_cpu(sgl->word2);
745         bf_set(lpfc_sli4_sge_last, sgl, 0);
746         sgl->word2 = cpu_to_le32(sgl->word2);
747         sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
748         sgl++;
749
750         /* Setup the physical region for the FCP RSP */
751         pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
752         sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
753         sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
754         sgl->word2 = le32_to_cpu(sgl->word2);
755         bf_set(lpfc_sli4_sge_last, sgl, 1);
756         sgl->word2 = cpu_to_le32(sgl->word2);
757         sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
758
759         /*
760          * Since the IOCB for the FCP I/O is built into this
761          * lpfc_scsi_buf, initialize it with all known data now.
762          */
763         iocb = &lpfc_cmd->cur_iocbq.iocb;
764         iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
765         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
766         /* setting the BLP size to 2 * sizeof BDE may not be correct.
767          * We are setting the bpl to point to out sgl. An sgl's
768          * entries are 16 bytes, a bpl entries are 12 bytes.
769          */
770         iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
771         iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
772         iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
773         iocb->ulpBdeCount = 1;
774         iocb->ulpLe = 1;
775         iocb->ulpClass = CLASS3;
776
777         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
778                 atomic_inc(&ndlp->cmd_pending);
779                 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
780         }
781         return  lpfc_cmd;
782 }
783 /**
784  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
785  * @phba: The HBA for which this call is being executed.
786  *
787  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
788  * and returns to caller.
789  *
790  * Return codes:
791  *   NULL - Error
792  *   Pointer to lpfc_scsi_buf - Success
793  **/
794 static struct lpfc_scsi_buf*
795 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
796 {
797         return  phba->lpfc_get_scsi_buf(phba, ndlp);
798 }
799
800 /**
801  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
802  * @phba: The Hba for which this call is being executed.
803  * @psb: The scsi buffer which is being released.
804  *
805  * This routine releases @psb scsi buffer by adding it to tail of @phba
806  * lpfc_scsi_buf_list list.
807  **/
808 static void
809 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
810 {
811         unsigned long iflag = 0;
812
813         psb->seg_cnt = 0;
814         psb->prot_seg_cnt = 0;
815
816         spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
817         psb->pCmd = NULL;
818         psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
819         list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
820         spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
821 }
822
823 /**
824  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
825  * @phba: The Hba for which this call is being executed.
826  * @psb: The scsi buffer which is being released.
827  *
828  * This routine releases @psb scsi buffer by adding it to tail of @phba
829  * lpfc_common_buf_list list. For SLI4 XRI's are tied to the scsi buffer
830  * and cannot be reused for at least RA_TOV amount of time if it was
831  * aborted.
832  **/
833 static void
834 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
835 {
836         unsigned long iflag = 0;
837
838         psb->seg_cnt = 0;
839         psb->prot_seg_cnt = 0;
840
841         if (psb->exch_busy) {
842                 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
843                                         iflag);
844                 psb->pCmd = NULL;
845                 list_add_tail(&psb->list,
846                         &phba->sli4_hba.lpfc_abts_scsi_buf_list);
847                 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
848                                         iflag);
849         } else {
850                 /* MUST zero fields if buffer is reused by another protocol */
851                 psb->pCmd = NULL;
852                 psb->cur_iocbq.iocb_cmpl = NULL;
853                 spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
854                 list_add_tail(&psb->list, &phba->lpfc_common_buf_list_put);
855                 phba->put_common_bufs++;
856                 spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
857         }
858 }
859
860 /**
861  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
862  * @phba: The Hba for which this call is being executed.
863  * @psb: The scsi buffer which is being released.
864  *
865  * This routine releases @psb scsi buffer by adding it to tail of @phba
866  * lpfc_scsi_buf_list list.
867  **/
868 static void
869 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
870 {
871         if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
872                 atomic_dec(&psb->ndlp->cmd_pending);
873
874         psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
875         phba->lpfc_release_scsi_buf(phba, psb);
876 }
877
878 /**
879  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
880  * @phba: The Hba for which this call is being executed.
881  * @lpfc_cmd: The scsi buffer which is going to be mapped.
882  *
883  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
884  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
885  * through sg elements and format the bde. This routine also initializes all
886  * IOCB fields which are dependent on scsi command request buffer.
887  *
888  * Return codes:
889  *   1 - Error
890  *   0 - Success
891  **/
892 static int
893 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
894 {
895         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
896         struct scatterlist *sgel = NULL;
897         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
898         struct ulp_bde64 *bpl = lpfc_cmd->dma_sgl;
899         struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
900         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
901         struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
902         dma_addr_t physaddr;
903         uint32_t num_bde = 0;
904         int nseg, datadir = scsi_cmnd->sc_data_direction;
905
906         /*
907          * There are three possibilities here - use scatter-gather segment, use
908          * the single mapping, or neither.  Start the lpfc command prep by
909          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
910          * data bde entry.
911          */
912         bpl += 2;
913         if (scsi_sg_count(scsi_cmnd)) {
914                 /*
915                  * The driver stores the segment count returned from pci_map_sg
916                  * because this a count of dma-mappings used to map the use_sg
917                  * pages.  They are not guaranteed to be the same for those
918                  * architectures that implement an IOMMU.
919                  */
920
921                 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
922                                   scsi_sg_count(scsi_cmnd), datadir);
923                 if (unlikely(!nseg))
924                         return 1;
925
926                 lpfc_cmd->seg_cnt = nseg;
927                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
928                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
929                                 "9064 BLKGRD: %s: Too many sg segments from "
930                                "dma_map_sg.  Config %d, seg_cnt %d\n",
931                                __func__, phba->cfg_sg_seg_cnt,
932                                lpfc_cmd->seg_cnt);
933                         lpfc_cmd->seg_cnt = 0;
934                         scsi_dma_unmap(scsi_cmnd);
935                         return 1;
936                 }
937
938                 /*
939                  * The driver established a maximum scatter-gather segment count
940                  * during probe that limits the number of sg elements in any
941                  * single scsi command.  Just run through the seg_cnt and format
942                  * the bde's.
943                  * When using SLI-3 the driver will try to fit all the BDEs into
944                  * the IOCB. If it can't then the BDEs get added to a BPL as it
945                  * does for SLI-2 mode.
946                  */
947                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
948                         physaddr = sg_dma_address(sgel);
949                         if (phba->sli_rev == 3 &&
950                             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
951                             !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
952                             nseg <= LPFC_EXT_DATA_BDE_COUNT) {
953                                 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
954                                 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
955                                 data_bde->addrLow = putPaddrLow(physaddr);
956                                 data_bde->addrHigh = putPaddrHigh(physaddr);
957                                 data_bde++;
958                         } else {
959                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
960                                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
961                                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
962                                 bpl->addrLow =
963                                         le32_to_cpu(putPaddrLow(physaddr));
964                                 bpl->addrHigh =
965                                         le32_to_cpu(putPaddrHigh(physaddr));
966                                 bpl++;
967                         }
968                 }
969         }
970
971         /*
972          * Finish initializing those IOCB fields that are dependent on the
973          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
974          * explicitly reinitialized and for SLI-3 the extended bde count is
975          * explicitly reinitialized since all iocb memory resources are reused.
976          */
977         if (phba->sli_rev == 3 &&
978             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
979             !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
980                 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
981                         /*
982                          * The extended IOCB format can only fit 3 BDE or a BPL.
983                          * This I/O has more than 3 BDE so the 1st data bde will
984                          * be a BPL that is filled in here.
985                          */
986                         physaddr = lpfc_cmd->dma_handle;
987                         data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
988                         data_bde->tus.f.bdeSize = (num_bde *
989                                                    sizeof(struct ulp_bde64));
990                         physaddr += (sizeof(struct fcp_cmnd) +
991                                      sizeof(struct fcp_rsp) +
992                                      (2 * sizeof(struct ulp_bde64)));
993                         data_bde->addrHigh = putPaddrHigh(physaddr);
994                         data_bde->addrLow = putPaddrLow(physaddr);
995                         /* ebde count includes the response bde and data bpl */
996                         iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
997                 } else {
998                         /* ebde count includes the response bde and data bdes */
999                         iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1000                 }
1001         } else {
1002                 iocb_cmd->un.fcpi64.bdl.bdeSize =
1003                         ((num_bde + 2) * sizeof(struct ulp_bde64));
1004                 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1005         }
1006         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1007
1008         /*
1009          * Due to difference in data length between DIF/non-DIF paths,
1010          * we need to set word 4 of IOCB here
1011          */
1012         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1013         return 0;
1014 }
1015
1016 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1017
1018 /* Return BG_ERR_INIT if error injection is detected by Initiator */
1019 #define BG_ERR_INIT     0x1
1020 /* Return BG_ERR_TGT if error injection is detected by Target */
1021 #define BG_ERR_TGT      0x2
1022 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
1023 #define BG_ERR_SWAP     0x10
1024 /**
1025  * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1026  * error injection
1027  **/
1028 #define BG_ERR_CHECK    0x20
1029
1030 /**
1031  * lpfc_bg_err_inject - Determine if we should inject an error
1032  * @phba: The Hba for which this call is being executed.
1033  * @sc: The SCSI command to examine
1034  * @reftag: (out) BlockGuard reference tag for transmitted data
1035  * @apptag: (out) BlockGuard application tag for transmitted data
1036  * @new_guard (in) Value to replace CRC with if needed
1037  *
1038  * Returns BG_ERR_* bit mask or 0 if request ignored
1039  **/
1040 static int
1041 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1042                 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1043 {
1044         struct scatterlist *sgpe; /* s/g prot entry */
1045         struct lpfc_scsi_buf *lpfc_cmd = NULL;
1046         struct scsi_dif_tuple *src = NULL;
1047         struct lpfc_nodelist *ndlp;
1048         struct lpfc_rport_data *rdata;
1049         uint32_t op = scsi_get_prot_op(sc);
1050         uint32_t blksize;
1051         uint32_t numblks;
1052         sector_t lba;
1053         int rc = 0;
1054         int blockoff = 0;
1055
1056         if (op == SCSI_PROT_NORMAL)
1057                 return 0;
1058
1059         sgpe = scsi_prot_sglist(sc);
1060         lba = scsi_get_lba(sc);
1061
1062         /* First check if we need to match the LBA */
1063         if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1064                 blksize = lpfc_cmd_blksize(sc);
1065                 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1066
1067                 /* Make sure we have the right LBA if one is specified */
1068                 if ((phba->lpfc_injerr_lba < lba) ||
1069                         (phba->lpfc_injerr_lba >= (lba + numblks)))
1070                         return 0;
1071                 if (sgpe) {
1072                         blockoff = phba->lpfc_injerr_lba - lba;
1073                         numblks = sg_dma_len(sgpe) /
1074                                 sizeof(struct scsi_dif_tuple);
1075                         if (numblks < blockoff)
1076                                 blockoff = numblks;
1077                 }
1078         }
1079
1080         /* Next check if we need to match the remote NPortID or WWPN */
1081         rdata = lpfc_rport_data_from_scsi_device(sc->device);
1082         if (rdata && rdata->pnode) {
1083                 ndlp = rdata->pnode;
1084
1085                 /* Make sure we have the right NPortID if one is specified */
1086                 if (phba->lpfc_injerr_nportid  &&
1087                         (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1088                         return 0;
1089
1090                 /*
1091                  * Make sure we have the right WWPN if one is specified.
1092                  * wwn[0] should be a non-zero NAA in a good WWPN.
1093                  */
1094                 if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1095                         (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1096                                 sizeof(struct lpfc_name)) != 0))
1097                         return 0;
1098         }
1099
1100         /* Setup a ptr to the protection data if the SCSI host provides it */
1101         if (sgpe) {
1102                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1103                 src += blockoff;
1104                 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1105         }
1106
1107         /* Should we change the Reference Tag */
1108         if (reftag) {
1109                 if (phba->lpfc_injerr_wref_cnt) {
1110                         switch (op) {
1111                         case SCSI_PROT_WRITE_PASS:
1112                                 if (src) {
1113                                         /*
1114                                          * For WRITE_PASS, force the error
1115                                          * to be sent on the wire. It should
1116                                          * be detected by the Target.
1117                                          * If blockoff != 0 error will be
1118                                          * inserted in middle of the IO.
1119                                          */
1120
1121                                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1122                                         "9076 BLKGRD: Injecting reftag error: "
1123                                         "write lba x%lx + x%x oldrefTag x%x\n",
1124                                         (unsigned long)lba, blockoff,
1125                                         be32_to_cpu(src->ref_tag));
1126
1127                                         /*
1128                                          * Save the old ref_tag so we can
1129                                          * restore it on completion.
1130                                          */
1131                                         if (lpfc_cmd) {
1132                                                 lpfc_cmd->prot_data_type =
1133                                                         LPFC_INJERR_REFTAG;
1134                                                 lpfc_cmd->prot_data_segment =
1135                                                         src;
1136                                                 lpfc_cmd->prot_data =
1137                                                         src->ref_tag;
1138                                         }
1139                                         src->ref_tag = cpu_to_be32(0xDEADBEEF);
1140                                         phba->lpfc_injerr_wref_cnt--;
1141                                         if (phba->lpfc_injerr_wref_cnt == 0) {
1142                                                 phba->lpfc_injerr_nportid = 0;
1143                                                 phba->lpfc_injerr_lba =
1144                                                         LPFC_INJERR_LBA_OFF;
1145                                                 memset(&phba->lpfc_injerr_wwpn,
1146                                                   0, sizeof(struct lpfc_name));
1147                                         }
1148                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1149
1150                                         break;
1151                                 }
1152                                 /* Drop thru */
1153                         case SCSI_PROT_WRITE_INSERT:
1154                                 /*
1155                                  * For WRITE_INSERT, force the error
1156                                  * to be sent on the wire. It should be
1157                                  * detected by the Target.
1158                                  */
1159                                 /* DEADBEEF will be the reftag on the wire */
1160                                 *reftag = 0xDEADBEEF;
1161                                 phba->lpfc_injerr_wref_cnt--;
1162                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1163                                         phba->lpfc_injerr_nportid = 0;
1164                                         phba->lpfc_injerr_lba =
1165                                         LPFC_INJERR_LBA_OFF;
1166                                         memset(&phba->lpfc_injerr_wwpn,
1167                                                 0, sizeof(struct lpfc_name));
1168                                 }
1169                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1170
1171                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1172                                         "9078 BLKGRD: Injecting reftag error: "
1173                                         "write lba x%lx\n", (unsigned long)lba);
1174                                 break;
1175                         case SCSI_PROT_WRITE_STRIP:
1176                                 /*
1177                                  * For WRITE_STRIP and WRITE_PASS,
1178                                  * force the error on data
1179                                  * being copied from SLI-Host to SLI-Port.
1180                                  */
1181                                 *reftag = 0xDEADBEEF;
1182                                 phba->lpfc_injerr_wref_cnt--;
1183                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1184                                         phba->lpfc_injerr_nportid = 0;
1185                                         phba->lpfc_injerr_lba =
1186                                                 LPFC_INJERR_LBA_OFF;
1187                                         memset(&phba->lpfc_injerr_wwpn,
1188                                                 0, sizeof(struct lpfc_name));
1189                                 }
1190                                 rc = BG_ERR_INIT;
1191
1192                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1193                                         "9077 BLKGRD: Injecting reftag error: "
1194                                         "write lba x%lx\n", (unsigned long)lba);
1195                                 break;
1196                         }
1197                 }
1198                 if (phba->lpfc_injerr_rref_cnt) {
1199                         switch (op) {
1200                         case SCSI_PROT_READ_INSERT:
1201                         case SCSI_PROT_READ_STRIP:
1202                         case SCSI_PROT_READ_PASS:
1203                                 /*
1204                                  * For READ_STRIP and READ_PASS, force the
1205                                  * error on data being read off the wire. It
1206                                  * should force an IO error to the driver.
1207                                  */
1208                                 *reftag = 0xDEADBEEF;
1209                                 phba->lpfc_injerr_rref_cnt--;
1210                                 if (phba->lpfc_injerr_rref_cnt == 0) {
1211                                         phba->lpfc_injerr_nportid = 0;
1212                                         phba->lpfc_injerr_lba =
1213                                                 LPFC_INJERR_LBA_OFF;
1214                                         memset(&phba->lpfc_injerr_wwpn,
1215                                                 0, sizeof(struct lpfc_name));
1216                                 }
1217                                 rc = BG_ERR_INIT;
1218
1219                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1220                                         "9079 BLKGRD: Injecting reftag error: "
1221                                         "read lba x%lx\n", (unsigned long)lba);
1222                                 break;
1223                         }
1224                 }
1225         }
1226
1227         /* Should we change the Application Tag */
1228         if (apptag) {
1229                 if (phba->lpfc_injerr_wapp_cnt) {
1230                         switch (op) {
1231                         case SCSI_PROT_WRITE_PASS:
1232                                 if (src) {
1233                                         /*
1234                                          * For WRITE_PASS, force the error
1235                                          * to be sent on the wire. It should
1236                                          * be detected by the Target.
1237                                          * If blockoff != 0 error will be
1238                                          * inserted in middle of the IO.
1239                                          */
1240
1241                                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1242                                         "9080 BLKGRD: Injecting apptag error: "
1243                                         "write lba x%lx + x%x oldappTag x%x\n",
1244                                         (unsigned long)lba, blockoff,
1245                                         be16_to_cpu(src->app_tag));
1246
1247                                         /*
1248                                          * Save the old app_tag so we can
1249                                          * restore it on completion.
1250                                          */
1251                                         if (lpfc_cmd) {
1252                                                 lpfc_cmd->prot_data_type =
1253                                                         LPFC_INJERR_APPTAG;
1254                                                 lpfc_cmd->prot_data_segment =
1255                                                         src;
1256                                                 lpfc_cmd->prot_data =
1257                                                         src->app_tag;
1258                                         }
1259                                         src->app_tag = cpu_to_be16(0xDEAD);
1260                                         phba->lpfc_injerr_wapp_cnt--;
1261                                         if (phba->lpfc_injerr_wapp_cnt == 0) {
1262                                                 phba->lpfc_injerr_nportid = 0;
1263                                                 phba->lpfc_injerr_lba =
1264                                                         LPFC_INJERR_LBA_OFF;
1265                                                 memset(&phba->lpfc_injerr_wwpn,
1266                                                   0, sizeof(struct lpfc_name));
1267                                         }
1268                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1269                                         break;
1270                                 }
1271                                 /* Drop thru */
1272                         case SCSI_PROT_WRITE_INSERT:
1273                                 /*
1274                                  * For WRITE_INSERT, force the
1275                                  * error to be sent on the wire. It should be
1276                                  * detected by the Target.
1277                                  */
1278                                 /* DEAD will be the apptag on the wire */
1279                                 *apptag = 0xDEAD;
1280                                 phba->lpfc_injerr_wapp_cnt--;
1281                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1282                                         phba->lpfc_injerr_nportid = 0;
1283                                         phba->lpfc_injerr_lba =
1284                                                 LPFC_INJERR_LBA_OFF;
1285                                         memset(&phba->lpfc_injerr_wwpn,
1286                                                 0, sizeof(struct lpfc_name));
1287                                 }
1288                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1289
1290                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1291                                         "0813 BLKGRD: Injecting apptag error: "
1292                                         "write lba x%lx\n", (unsigned long)lba);
1293                                 break;
1294                         case SCSI_PROT_WRITE_STRIP:
1295                                 /*
1296                                  * For WRITE_STRIP and WRITE_PASS,
1297                                  * force the error on data
1298                                  * being copied from SLI-Host to SLI-Port.
1299                                  */
1300                                 *apptag = 0xDEAD;
1301                                 phba->lpfc_injerr_wapp_cnt--;
1302                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1303                                         phba->lpfc_injerr_nportid = 0;
1304                                         phba->lpfc_injerr_lba =
1305                                                 LPFC_INJERR_LBA_OFF;
1306                                         memset(&phba->lpfc_injerr_wwpn,
1307                                                 0, sizeof(struct lpfc_name));
1308                                 }
1309                                 rc = BG_ERR_INIT;
1310
1311                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1312                                         "0812 BLKGRD: Injecting apptag error: "
1313                                         "write lba x%lx\n", (unsigned long)lba);
1314                                 break;
1315                         }
1316                 }
1317                 if (phba->lpfc_injerr_rapp_cnt) {
1318                         switch (op) {
1319                         case SCSI_PROT_READ_INSERT:
1320                         case SCSI_PROT_READ_STRIP:
1321                         case SCSI_PROT_READ_PASS:
1322                                 /*
1323                                  * For READ_STRIP and READ_PASS, force the
1324                                  * error on data being read off the wire. It
1325                                  * should force an IO error to the driver.
1326                                  */
1327                                 *apptag = 0xDEAD;
1328                                 phba->lpfc_injerr_rapp_cnt--;
1329                                 if (phba->lpfc_injerr_rapp_cnt == 0) {
1330                                         phba->lpfc_injerr_nportid = 0;
1331                                         phba->lpfc_injerr_lba =
1332                                                 LPFC_INJERR_LBA_OFF;
1333                                         memset(&phba->lpfc_injerr_wwpn,
1334                                                 0, sizeof(struct lpfc_name));
1335                                 }
1336                                 rc = BG_ERR_INIT;
1337
1338                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1339                                         "0814 BLKGRD: Injecting apptag error: "
1340                                         "read lba x%lx\n", (unsigned long)lba);
1341                                 break;
1342                         }
1343                 }
1344         }
1345
1346
1347         /* Should we change the Guard Tag */
1348         if (new_guard) {
1349                 if (phba->lpfc_injerr_wgrd_cnt) {
1350                         switch (op) {
1351                         case SCSI_PROT_WRITE_PASS:
1352                                 rc = BG_ERR_CHECK;
1353                                 /* Drop thru */
1354
1355                         case SCSI_PROT_WRITE_INSERT:
1356                                 /*
1357                                  * For WRITE_INSERT, force the
1358                                  * error to be sent on the wire. It should be
1359                                  * detected by the Target.
1360                                  */
1361                                 phba->lpfc_injerr_wgrd_cnt--;
1362                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1363                                         phba->lpfc_injerr_nportid = 0;
1364                                         phba->lpfc_injerr_lba =
1365                                                 LPFC_INJERR_LBA_OFF;
1366                                         memset(&phba->lpfc_injerr_wwpn,
1367                                                 0, sizeof(struct lpfc_name));
1368                                 }
1369
1370                                 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1371                                 /* Signals the caller to swap CRC->CSUM */
1372
1373                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1374                                         "0817 BLKGRD: Injecting guard error: "
1375                                         "write lba x%lx\n", (unsigned long)lba);
1376                                 break;
1377                         case SCSI_PROT_WRITE_STRIP:
1378                                 /*
1379                                  * For WRITE_STRIP and WRITE_PASS,
1380                                  * force the error on data
1381                                  * being copied from SLI-Host to SLI-Port.
1382                                  */
1383                                 phba->lpfc_injerr_wgrd_cnt--;
1384                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1385                                         phba->lpfc_injerr_nportid = 0;
1386                                         phba->lpfc_injerr_lba =
1387                                                 LPFC_INJERR_LBA_OFF;
1388                                         memset(&phba->lpfc_injerr_wwpn,
1389                                                 0, sizeof(struct lpfc_name));
1390                                 }
1391
1392                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1393                                 /* Signals the caller to swap CRC->CSUM */
1394
1395                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1396                                         "0816 BLKGRD: Injecting guard error: "
1397                                         "write lba x%lx\n", (unsigned long)lba);
1398                                 break;
1399                         }
1400                 }
1401                 if (phba->lpfc_injerr_rgrd_cnt) {
1402                         switch (op) {
1403                         case SCSI_PROT_READ_INSERT:
1404                         case SCSI_PROT_READ_STRIP:
1405                         case SCSI_PROT_READ_PASS:
1406                                 /*
1407                                  * For READ_STRIP and READ_PASS, force the
1408                                  * error on data being read off the wire. It
1409                                  * should force an IO error to the driver.
1410                                  */
1411                                 phba->lpfc_injerr_rgrd_cnt--;
1412                                 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1413                                         phba->lpfc_injerr_nportid = 0;
1414                                         phba->lpfc_injerr_lba =
1415                                                 LPFC_INJERR_LBA_OFF;
1416                                         memset(&phba->lpfc_injerr_wwpn,
1417                                                 0, sizeof(struct lpfc_name));
1418                                 }
1419
1420                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1421                                 /* Signals the caller to swap CRC->CSUM */
1422
1423                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1424                                         "0818 BLKGRD: Injecting guard error: "
1425                                         "read lba x%lx\n", (unsigned long)lba);
1426                         }
1427                 }
1428         }
1429
1430         return rc;
1431 }
1432 #endif
1433
1434 /**
1435  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1436  * the specified SCSI command.
1437  * @phba: The Hba for which this call is being executed.
1438  * @sc: The SCSI command to examine
1439  * @txopt: (out) BlockGuard operation for transmitted data
1440  * @rxopt: (out) BlockGuard operation for received data
1441  *
1442  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1443  *
1444  **/
1445 static int
1446 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1447                 uint8_t *txop, uint8_t *rxop)
1448 {
1449         uint8_t ret = 0;
1450
1451         if (lpfc_cmd_guard_csum(sc)) {
1452                 switch (scsi_get_prot_op(sc)) {
1453                 case SCSI_PROT_READ_INSERT:
1454                 case SCSI_PROT_WRITE_STRIP:
1455                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1456                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1457                         break;
1458
1459                 case SCSI_PROT_READ_STRIP:
1460                 case SCSI_PROT_WRITE_INSERT:
1461                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1462                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1463                         break;
1464
1465                 case SCSI_PROT_READ_PASS:
1466                 case SCSI_PROT_WRITE_PASS:
1467                         *rxop = BG_OP_IN_CRC_OUT_CSUM;
1468                         *txop = BG_OP_IN_CSUM_OUT_CRC;
1469                         break;
1470
1471                 case SCSI_PROT_NORMAL:
1472                 default:
1473                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1474                                 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1475                                         scsi_get_prot_op(sc));
1476                         ret = 1;
1477                         break;
1478
1479                 }
1480         } else {
1481                 switch (scsi_get_prot_op(sc)) {
1482                 case SCSI_PROT_READ_STRIP:
1483                 case SCSI_PROT_WRITE_INSERT:
1484                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1485                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1486                         break;
1487
1488                 case SCSI_PROT_READ_PASS:
1489                 case SCSI_PROT_WRITE_PASS:
1490                         *rxop = BG_OP_IN_CRC_OUT_CRC;
1491                         *txop = BG_OP_IN_CRC_OUT_CRC;
1492                         break;
1493
1494                 case SCSI_PROT_READ_INSERT:
1495                 case SCSI_PROT_WRITE_STRIP:
1496                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1497                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1498                         break;
1499
1500                 case SCSI_PROT_NORMAL:
1501                 default:
1502                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1503                                 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1504                                         scsi_get_prot_op(sc));
1505                         ret = 1;
1506                         break;
1507                 }
1508         }
1509
1510         return ret;
1511 }
1512
1513 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1514 /**
1515  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1516  * the specified SCSI command in order to force a guard tag error.
1517  * @phba: The Hba for which this call is being executed.
1518  * @sc: The SCSI command to examine
1519  * @txopt: (out) BlockGuard operation for transmitted data
1520  * @rxopt: (out) BlockGuard operation for received data
1521  *
1522  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1523  *
1524  **/
1525 static int
1526 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1527                 uint8_t *txop, uint8_t *rxop)
1528 {
1529         uint8_t ret = 0;
1530
1531         if (lpfc_cmd_guard_csum(sc)) {
1532                 switch (scsi_get_prot_op(sc)) {
1533                 case SCSI_PROT_READ_INSERT:
1534                 case SCSI_PROT_WRITE_STRIP:
1535                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1536                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1537                         break;
1538
1539                 case SCSI_PROT_READ_STRIP:
1540                 case SCSI_PROT_WRITE_INSERT:
1541                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1542                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1543                         break;
1544
1545                 case SCSI_PROT_READ_PASS:
1546                 case SCSI_PROT_WRITE_PASS:
1547                         *rxop = BG_OP_IN_CSUM_OUT_CRC;
1548                         *txop = BG_OP_IN_CRC_OUT_CSUM;
1549                         break;
1550
1551                 case SCSI_PROT_NORMAL:
1552                 default:
1553                         break;
1554
1555                 }
1556         } else {
1557                 switch (scsi_get_prot_op(sc)) {
1558                 case SCSI_PROT_READ_STRIP:
1559                 case SCSI_PROT_WRITE_INSERT:
1560                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1561                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1562                         break;
1563
1564                 case SCSI_PROT_READ_PASS:
1565                 case SCSI_PROT_WRITE_PASS:
1566                         *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1567                         *txop = BG_OP_IN_CSUM_OUT_CSUM;
1568                         break;
1569
1570                 case SCSI_PROT_READ_INSERT:
1571                 case SCSI_PROT_WRITE_STRIP:
1572                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1573                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1574                         break;
1575
1576                 case SCSI_PROT_NORMAL:
1577                 default:
1578                         break;
1579                 }
1580         }
1581
1582         return ret;
1583 }
1584 #endif
1585
1586 /**
1587  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1588  * @phba: The Hba for which this call is being executed.
1589  * @sc: pointer to scsi command we're working on
1590  * @bpl: pointer to buffer list for protection groups
1591  * @datacnt: number of segments of data that have been dma mapped
1592  *
1593  * This function sets up BPL buffer list for protection groups of
1594  * type LPFC_PG_TYPE_NO_DIF
1595  *
1596  * This is usually used when the HBA is instructed to generate
1597  * DIFs and insert them into data stream (or strip DIF from
1598  * incoming data stream)
1599  *
1600  * The buffer list consists of just one protection group described
1601  * below:
1602  *                                +-------------------------+
1603  *   start of prot group  -->     |          PDE_5          |
1604  *                                +-------------------------+
1605  *                                |          PDE_6          |
1606  *                                +-------------------------+
1607  *                                |         Data BDE        |
1608  *                                +-------------------------+
1609  *                                |more Data BDE's ... (opt)|
1610  *                                +-------------------------+
1611  *
1612  *
1613  * Note: Data s/g buffers have been dma mapped
1614  *
1615  * Returns the number of BDEs added to the BPL.
1616  **/
1617 static int
1618 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1619                 struct ulp_bde64 *bpl, int datasegcnt)
1620 {
1621         struct scatterlist *sgde = NULL; /* s/g data entry */
1622         struct lpfc_pde5 *pde5 = NULL;
1623         struct lpfc_pde6 *pde6 = NULL;
1624         dma_addr_t physaddr;
1625         int i = 0, num_bde = 0, status;
1626         int datadir = sc->sc_data_direction;
1627 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1628         uint32_t rc;
1629 #endif
1630         uint32_t checking = 1;
1631         uint32_t reftag;
1632         uint8_t txop, rxop;
1633
1634         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1635         if (status)
1636                 goto out;
1637
1638         /* extract some info from the scsi command for pde*/
1639         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1640
1641 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1642         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1643         if (rc) {
1644                 if (rc & BG_ERR_SWAP)
1645                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1646                 if (rc & BG_ERR_CHECK)
1647                         checking = 0;
1648         }
1649 #endif
1650
1651         /* setup PDE5 with what we have */
1652         pde5 = (struct lpfc_pde5 *) bpl;
1653         memset(pde5, 0, sizeof(struct lpfc_pde5));
1654         bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1655
1656         /* Endianness conversion if necessary for PDE5 */
1657         pde5->word0 = cpu_to_le32(pde5->word0);
1658         pde5->reftag = cpu_to_le32(reftag);
1659
1660         /* advance bpl and increment bde count */
1661         num_bde++;
1662         bpl++;
1663         pde6 = (struct lpfc_pde6 *) bpl;
1664
1665         /* setup PDE6 with the rest of the info */
1666         memset(pde6, 0, sizeof(struct lpfc_pde6));
1667         bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1668         bf_set(pde6_optx, pde6, txop);
1669         bf_set(pde6_oprx, pde6, rxop);
1670
1671         /*
1672          * We only need to check the data on READs, for WRITEs
1673          * protection data is automatically generated, not checked.
1674          */
1675         if (datadir == DMA_FROM_DEVICE) {
1676                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1677                         bf_set(pde6_ce, pde6, checking);
1678                 else
1679                         bf_set(pde6_ce, pde6, 0);
1680
1681                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1682                         bf_set(pde6_re, pde6, checking);
1683                 else
1684                         bf_set(pde6_re, pde6, 0);
1685         }
1686         bf_set(pde6_ai, pde6, 1);
1687         bf_set(pde6_ae, pde6, 0);
1688         bf_set(pde6_apptagval, pde6, 0);
1689
1690         /* Endianness conversion if necessary for PDE6 */
1691         pde6->word0 = cpu_to_le32(pde6->word0);
1692         pde6->word1 = cpu_to_le32(pde6->word1);
1693         pde6->word2 = cpu_to_le32(pde6->word2);
1694
1695         /* advance bpl and increment bde count */
1696         num_bde++;
1697         bpl++;
1698
1699         /* assumption: caller has already run dma_map_sg on command data */
1700         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1701                 physaddr = sg_dma_address(sgde);
1702                 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1703                 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1704                 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1705                 if (datadir == DMA_TO_DEVICE)
1706                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1707                 else
1708                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1709                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1710                 bpl++;
1711                 num_bde++;
1712         }
1713
1714 out:
1715         return num_bde;
1716 }
1717
1718 /**
1719  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1720  * @phba: The Hba for which this call is being executed.
1721  * @sc: pointer to scsi command we're working on
1722  * @bpl: pointer to buffer list for protection groups
1723  * @datacnt: number of segments of data that have been dma mapped
1724  * @protcnt: number of segment of protection data that have been dma mapped
1725  *
1726  * This function sets up BPL buffer list for protection groups of
1727  * type LPFC_PG_TYPE_DIF
1728  *
1729  * This is usually used when DIFs are in their own buffers,
1730  * separate from the data. The HBA can then by instructed
1731  * to place the DIFs in the outgoing stream.  For read operations,
1732  * The HBA could extract the DIFs and place it in DIF buffers.
1733  *
1734  * The buffer list for this type consists of one or more of the
1735  * protection groups described below:
1736  *                                    +-------------------------+
1737  *   start of first prot group  -->   |          PDE_5          |
1738  *                                    +-------------------------+
1739  *                                    |          PDE_6          |
1740  *                                    +-------------------------+
1741  *                                    |      PDE_7 (Prot BDE)   |
1742  *                                    +-------------------------+
1743  *                                    |        Data BDE         |
1744  *                                    +-------------------------+
1745  *                                    |more Data BDE's ... (opt)|
1746  *                                    +-------------------------+
1747  *   start of new  prot group  -->    |          PDE_5          |
1748  *                                    +-------------------------+
1749  *                                    |          ...            |
1750  *                                    +-------------------------+
1751  *
1752  * Note: It is assumed that both data and protection s/g buffers have been
1753  *       mapped for DMA
1754  *
1755  * Returns the number of BDEs added to the BPL.
1756  **/
1757 static int
1758 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1759                 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1760 {
1761         struct scatterlist *sgde = NULL; /* s/g data entry */
1762         struct scatterlist *sgpe = NULL; /* s/g prot entry */
1763         struct lpfc_pde5 *pde5 = NULL;
1764         struct lpfc_pde6 *pde6 = NULL;
1765         struct lpfc_pde7 *pde7 = NULL;
1766         dma_addr_t dataphysaddr, protphysaddr;
1767         unsigned short curr_data = 0, curr_prot = 0;
1768         unsigned int split_offset;
1769         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1770         unsigned int protgrp_blks, protgrp_bytes;
1771         unsigned int remainder, subtotal;
1772         int status;
1773         int datadir = sc->sc_data_direction;
1774         unsigned char pgdone = 0, alldone = 0;
1775         unsigned blksize;
1776 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1777         uint32_t rc;
1778 #endif
1779         uint32_t checking = 1;
1780         uint32_t reftag;
1781         uint8_t txop, rxop;
1782         int num_bde = 0;
1783
1784         sgpe = scsi_prot_sglist(sc);
1785         sgde = scsi_sglist(sc);
1786
1787         if (!sgpe || !sgde) {
1788                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1789                                 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1790                                 sgpe, sgde);
1791                 return 0;
1792         }
1793
1794         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1795         if (status)
1796                 goto out;
1797
1798         /* extract some info from the scsi command */
1799         blksize = lpfc_cmd_blksize(sc);
1800         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1801
1802 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1803         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1804         if (rc) {
1805                 if (rc & BG_ERR_SWAP)
1806                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1807                 if (rc & BG_ERR_CHECK)
1808                         checking = 0;
1809         }
1810 #endif
1811
1812         split_offset = 0;
1813         do {
1814                 /* Check to see if we ran out of space */
1815                 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1816                         return num_bde + 3;
1817
1818                 /* setup PDE5 with what we have */
1819                 pde5 = (struct lpfc_pde5 *) bpl;
1820                 memset(pde5, 0, sizeof(struct lpfc_pde5));
1821                 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1822
1823                 /* Endianness conversion if necessary for PDE5 */
1824                 pde5->word0 = cpu_to_le32(pde5->word0);
1825                 pde5->reftag = cpu_to_le32(reftag);
1826
1827                 /* advance bpl and increment bde count */
1828                 num_bde++;
1829                 bpl++;
1830                 pde6 = (struct lpfc_pde6 *) bpl;
1831
1832                 /* setup PDE6 with the rest of the info */
1833                 memset(pde6, 0, sizeof(struct lpfc_pde6));
1834                 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1835                 bf_set(pde6_optx, pde6, txop);
1836                 bf_set(pde6_oprx, pde6, rxop);
1837
1838                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1839                         bf_set(pde6_ce, pde6, checking);
1840                 else
1841                         bf_set(pde6_ce, pde6, 0);
1842
1843                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1844                         bf_set(pde6_re, pde6, checking);
1845                 else
1846                         bf_set(pde6_re, pde6, 0);
1847
1848                 bf_set(pde6_ai, pde6, 1);
1849                 bf_set(pde6_ae, pde6, 0);
1850                 bf_set(pde6_apptagval, pde6, 0);
1851
1852                 /* Endianness conversion if necessary for PDE6 */
1853                 pde6->word0 = cpu_to_le32(pde6->word0);
1854                 pde6->word1 = cpu_to_le32(pde6->word1);
1855                 pde6->word2 = cpu_to_le32(pde6->word2);
1856
1857                 /* advance bpl and increment bde count */
1858                 num_bde++;
1859                 bpl++;
1860
1861                 /* setup the first BDE that points to protection buffer */
1862                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1863                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1864
1865                 /* must be integer multiple of the DIF block length */
1866                 BUG_ON(protgroup_len % 8);
1867
1868                 pde7 = (struct lpfc_pde7 *) bpl;
1869                 memset(pde7, 0, sizeof(struct lpfc_pde7));
1870                 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1871
1872                 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1873                 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1874
1875                 protgrp_blks = protgroup_len / 8;
1876                 protgrp_bytes = protgrp_blks * blksize;
1877
1878                 /* check if this pde is crossing the 4K boundary; if so split */
1879                 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1880                         protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1881                         protgroup_offset += protgroup_remainder;
1882                         protgrp_blks = protgroup_remainder / 8;
1883                         protgrp_bytes = protgrp_blks * blksize;
1884                 } else {
1885                         protgroup_offset = 0;
1886                         curr_prot++;
1887                 }
1888
1889                 num_bde++;
1890
1891                 /* setup BDE's for data blocks associated with DIF data */
1892                 pgdone = 0;
1893                 subtotal = 0; /* total bytes processed for current prot grp */
1894                 while (!pgdone) {
1895                         /* Check to see if we ran out of space */
1896                         if (num_bde >= phba->cfg_total_seg_cnt)
1897                                 return num_bde + 1;
1898
1899                         if (!sgde) {
1900                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1901                                         "9065 BLKGRD:%s Invalid data segment\n",
1902                                                 __func__);
1903                                 return 0;
1904                         }
1905                         bpl++;
1906                         dataphysaddr = sg_dma_address(sgde) + split_offset;
1907                         bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1908                         bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1909
1910                         remainder = sg_dma_len(sgde) - split_offset;
1911
1912                         if ((subtotal + remainder) <= protgrp_bytes) {
1913                                 /* we can use this whole buffer */
1914                                 bpl->tus.f.bdeSize = remainder;
1915                                 split_offset = 0;
1916
1917                                 if ((subtotal + remainder) == protgrp_bytes)
1918                                         pgdone = 1;
1919                         } else {
1920                                 /* must split this buffer with next prot grp */
1921                                 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1922                                 split_offset += bpl->tus.f.bdeSize;
1923                         }
1924
1925                         subtotal += bpl->tus.f.bdeSize;
1926
1927                         if (datadir == DMA_TO_DEVICE)
1928                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1929                         else
1930                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1931                         bpl->tus.w = le32_to_cpu(bpl->tus.w);
1932
1933                         num_bde++;
1934                         curr_data++;
1935
1936                         if (split_offset)
1937                                 break;
1938
1939                         /* Move to the next s/g segment if possible */
1940                         sgde = sg_next(sgde);
1941
1942                 }
1943
1944                 if (protgroup_offset) {
1945                         /* update the reference tag */
1946                         reftag += protgrp_blks;
1947                         bpl++;
1948                         continue;
1949                 }
1950
1951                 /* are we done ? */
1952                 if (curr_prot == protcnt) {
1953                         alldone = 1;
1954                 } else if (curr_prot < protcnt) {
1955                         /* advance to next prot buffer */
1956                         sgpe = sg_next(sgpe);
1957                         bpl++;
1958
1959                         /* update the reference tag */
1960                         reftag += protgrp_blks;
1961                 } else {
1962                         /* if we're here, we have a bug */
1963                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1964                                 "9054 BLKGRD: bug in %s\n", __func__);
1965                 }
1966
1967         } while (!alldone);
1968 out:
1969
1970         return num_bde;
1971 }
1972
1973 /**
1974  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1975  * @phba: The Hba for which this call is being executed.
1976  * @sc: pointer to scsi command we're working on
1977  * @sgl: pointer to buffer list for protection groups
1978  * @datacnt: number of segments of data that have been dma mapped
1979  *
1980  * This function sets up SGL buffer list for protection groups of
1981  * type LPFC_PG_TYPE_NO_DIF
1982  *
1983  * This is usually used when the HBA is instructed to generate
1984  * DIFs and insert them into data stream (or strip DIF from
1985  * incoming data stream)
1986  *
1987  * The buffer list consists of just one protection group described
1988  * below:
1989  *                                +-------------------------+
1990  *   start of prot group  -->     |         DI_SEED         |
1991  *                                +-------------------------+
1992  *                                |         Data SGE        |
1993  *                                +-------------------------+
1994  *                                |more Data SGE's ... (opt)|
1995  *                                +-------------------------+
1996  *
1997  *
1998  * Note: Data s/g buffers have been dma mapped
1999  *
2000  * Returns the number of SGEs added to the SGL.
2001  **/
2002 static int
2003 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2004                 struct sli4_sge *sgl, int datasegcnt)
2005 {
2006         struct scatterlist *sgde = NULL; /* s/g data entry */
2007         struct sli4_sge_diseed *diseed = NULL;
2008         dma_addr_t physaddr;
2009         int i = 0, num_sge = 0, status;
2010         uint32_t reftag;
2011         uint8_t txop, rxop;
2012 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2013         uint32_t rc;
2014 #endif
2015         uint32_t checking = 1;
2016         uint32_t dma_len;
2017         uint32_t dma_offset = 0;
2018
2019         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2020         if (status)
2021                 goto out;
2022
2023         /* extract some info from the scsi command for pde*/
2024         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2025
2026 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2027         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2028         if (rc) {
2029                 if (rc & BG_ERR_SWAP)
2030                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2031                 if (rc & BG_ERR_CHECK)
2032                         checking = 0;
2033         }
2034 #endif
2035
2036         /* setup DISEED with what we have */
2037         diseed = (struct sli4_sge_diseed *) sgl;
2038         memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2039         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2040
2041         /* Endianness conversion if necessary */
2042         diseed->ref_tag = cpu_to_le32(reftag);
2043         diseed->ref_tag_tran = diseed->ref_tag;
2044
2045         /*
2046          * We only need to check the data on READs, for WRITEs
2047          * protection data is automatically generated, not checked.
2048          */
2049         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2050                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2051                         bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2052                 else
2053                         bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2054
2055                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2056                         bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2057                 else
2058                         bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2059         }
2060
2061         /* setup DISEED with the rest of the info */
2062         bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2063         bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2064
2065         bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2066         bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2067
2068         /* Endianness conversion if necessary for DISEED */
2069         diseed->word2 = cpu_to_le32(diseed->word2);
2070         diseed->word3 = cpu_to_le32(diseed->word3);
2071
2072         /* advance bpl and increment sge count */
2073         num_sge++;
2074         sgl++;
2075
2076         /* assumption: caller has already run dma_map_sg on command data */
2077         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2078                 physaddr = sg_dma_address(sgde);
2079                 dma_len = sg_dma_len(sgde);
2080                 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2081                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2082                 if ((i + 1) == datasegcnt)
2083                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2084                 else
2085                         bf_set(lpfc_sli4_sge_last, sgl, 0);
2086                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2087                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2088
2089                 sgl->sge_len = cpu_to_le32(dma_len);
2090                 dma_offset += dma_len;
2091
2092                 sgl++;
2093                 num_sge++;
2094         }
2095
2096 out:
2097         return num_sge;
2098 }
2099
2100 /**
2101  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2102  * @phba: The Hba for which this call is being executed.
2103  * @sc: pointer to scsi command we're working on
2104  * @sgl: pointer to buffer list for protection groups
2105  * @datacnt: number of segments of data that have been dma mapped
2106  * @protcnt: number of segment of protection data that have been dma mapped
2107  *
2108  * This function sets up SGL buffer list for protection groups of
2109  * type LPFC_PG_TYPE_DIF
2110  *
2111  * This is usually used when DIFs are in their own buffers,
2112  * separate from the data. The HBA can then by instructed
2113  * to place the DIFs in the outgoing stream.  For read operations,
2114  * The HBA could extract the DIFs and place it in DIF buffers.
2115  *
2116  * The buffer list for this type consists of one or more of the
2117  * protection groups described below:
2118  *                                    +-------------------------+
2119  *   start of first prot group  -->   |         DISEED          |
2120  *                                    +-------------------------+
2121  *                                    |      DIF (Prot SGE)     |
2122  *                                    +-------------------------+
2123  *                                    |        Data SGE         |
2124  *                                    +-------------------------+
2125  *                                    |more Data SGE's ... (opt)|
2126  *                                    +-------------------------+
2127  *   start of new  prot group  -->    |         DISEED          |
2128  *                                    +-------------------------+
2129  *                                    |          ...            |
2130  *                                    +-------------------------+
2131  *
2132  * Note: It is assumed that both data and protection s/g buffers have been
2133  *       mapped for DMA
2134  *
2135  * Returns the number of SGEs added to the SGL.
2136  **/
2137 static int
2138 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2139                 struct sli4_sge *sgl, int datacnt, int protcnt)
2140 {
2141         struct scatterlist *sgde = NULL; /* s/g data entry */
2142         struct scatterlist *sgpe = NULL; /* s/g prot entry */
2143         struct sli4_sge_diseed *diseed = NULL;
2144         dma_addr_t dataphysaddr, protphysaddr;
2145         unsigned short curr_data = 0, curr_prot = 0;
2146         unsigned int split_offset;
2147         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2148         unsigned int protgrp_blks, protgrp_bytes;
2149         unsigned int remainder, subtotal;
2150         int status;
2151         unsigned char pgdone = 0, alldone = 0;
2152         unsigned blksize;
2153         uint32_t reftag;
2154         uint8_t txop, rxop;
2155         uint32_t dma_len;
2156 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2157         uint32_t rc;
2158 #endif
2159         uint32_t checking = 1;
2160         uint32_t dma_offset = 0;
2161         int num_sge = 0;
2162
2163         sgpe = scsi_prot_sglist(sc);
2164         sgde = scsi_sglist(sc);
2165
2166         if (!sgpe || !sgde) {
2167                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2168                                 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2169                                 sgpe, sgde);
2170                 return 0;
2171         }
2172
2173         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2174         if (status)
2175                 goto out;
2176
2177         /* extract some info from the scsi command */
2178         blksize = lpfc_cmd_blksize(sc);
2179         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2180
2181 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2182         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2183         if (rc) {
2184                 if (rc & BG_ERR_SWAP)
2185                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2186                 if (rc & BG_ERR_CHECK)
2187                         checking = 0;
2188         }
2189 #endif
2190
2191         split_offset = 0;
2192         do {
2193                 /* Check to see if we ran out of space */
2194                 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2195                         return num_sge + 3;
2196
2197                 /* setup DISEED with what we have */
2198                 diseed = (struct sli4_sge_diseed *) sgl;
2199                 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2200                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2201
2202                 /* Endianness conversion if necessary */
2203                 diseed->ref_tag = cpu_to_le32(reftag);
2204                 diseed->ref_tag_tran = diseed->ref_tag;
2205
2206                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2207                         bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2208
2209                 } else {
2210                         bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2211                         /*
2212                          * When in this mode, the hardware will replace
2213                          * the guard tag from the host with a
2214                          * newly generated good CRC for the wire.
2215                          * Switch to raw mode here to avoid this
2216                          * behavior. What the host sends gets put on the wire.
2217                          */
2218                         if (txop == BG_OP_IN_CRC_OUT_CRC) {
2219                                 txop = BG_OP_RAW_MODE;
2220                                 rxop = BG_OP_RAW_MODE;
2221                         }
2222                 }
2223
2224
2225                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2226                         bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2227                 else
2228                         bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2229
2230                 /* setup DISEED with the rest of the info */
2231                 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2232                 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2233
2234                 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2235                 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2236
2237                 /* Endianness conversion if necessary for DISEED */
2238                 diseed->word2 = cpu_to_le32(diseed->word2);
2239                 diseed->word3 = cpu_to_le32(diseed->word3);
2240
2241                 /* advance sgl and increment bde count */
2242                 num_sge++;
2243                 sgl++;
2244
2245                 /* setup the first BDE that points to protection buffer */
2246                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2247                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2248
2249                 /* must be integer multiple of the DIF block length */
2250                 BUG_ON(protgroup_len % 8);
2251
2252                 /* Now setup DIF SGE */
2253                 sgl->word2 = 0;
2254                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2255                 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2256                 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2257                 sgl->word2 = cpu_to_le32(sgl->word2);
2258
2259                 protgrp_blks = protgroup_len / 8;
2260                 protgrp_bytes = protgrp_blks * blksize;
2261
2262                 /* check if DIF SGE is crossing the 4K boundary; if so split */
2263                 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2264                         protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2265                         protgroup_offset += protgroup_remainder;
2266                         protgrp_blks = protgroup_remainder / 8;
2267                         protgrp_bytes = protgrp_blks * blksize;
2268                 } else {
2269                         protgroup_offset = 0;
2270                         curr_prot++;
2271                 }
2272
2273                 num_sge++;
2274
2275                 /* setup SGE's for data blocks associated with DIF data */
2276                 pgdone = 0;
2277                 subtotal = 0; /* total bytes processed for current prot grp */
2278                 while (!pgdone) {
2279                         /* Check to see if we ran out of space */
2280                         if (num_sge >= phba->cfg_total_seg_cnt)
2281                                 return num_sge + 1;
2282
2283                         if (!sgde) {
2284                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2285                                         "9086 BLKGRD:%s Invalid data segment\n",
2286                                                 __func__);
2287                                 return 0;
2288                         }
2289                         sgl++;
2290                         dataphysaddr = sg_dma_address(sgde) + split_offset;
2291
2292                         remainder = sg_dma_len(sgde) - split_offset;
2293
2294                         if ((subtotal + remainder) <= protgrp_bytes) {
2295                                 /* we can use this whole buffer */
2296                                 dma_len = remainder;
2297                                 split_offset = 0;
2298
2299                                 if ((subtotal + remainder) == protgrp_bytes)
2300                                         pgdone = 1;
2301                         } else {
2302                                 /* must split this buffer with next prot grp */
2303                                 dma_len = protgrp_bytes - subtotal;
2304                                 split_offset += dma_len;
2305                         }
2306
2307                         subtotal += dma_len;
2308
2309                         sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2310                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2311                         bf_set(lpfc_sli4_sge_last, sgl, 0);
2312                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2313                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2314
2315                         sgl->sge_len = cpu_to_le32(dma_len);
2316                         dma_offset += dma_len;
2317
2318                         num_sge++;
2319                         curr_data++;
2320
2321                         if (split_offset)
2322                                 break;
2323
2324                         /* Move to the next s/g segment if possible */
2325                         sgde = sg_next(sgde);
2326                 }
2327
2328                 if (protgroup_offset) {
2329                         /* update the reference tag */
2330                         reftag += protgrp_blks;
2331                         sgl++;
2332                         continue;
2333                 }
2334
2335                 /* are we done ? */
2336                 if (curr_prot == protcnt) {
2337                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2338                         alldone = 1;
2339                 } else if (curr_prot < protcnt) {
2340                         /* advance to next prot buffer */
2341                         sgpe = sg_next(sgpe);
2342                         sgl++;
2343
2344                         /* update the reference tag */
2345                         reftag += protgrp_blks;
2346                 } else {
2347                         /* if we're here, we have a bug */
2348                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2349                                 "9085 BLKGRD: bug in %s\n", __func__);
2350                 }
2351
2352         } while (!alldone);
2353
2354 out:
2355
2356         return num_sge;
2357 }
2358
2359 /**
2360  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2361  * @phba: The Hba for which this call is being executed.
2362  * @sc: pointer to scsi command we're working on
2363  *
2364  * Given a SCSI command that supports DIF, determine composition of protection
2365  * groups involved in setting up buffer lists
2366  *
2367  * Returns: Protection group type (with or without DIF)
2368  *
2369  **/
2370 static int
2371 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2372 {
2373         int ret = LPFC_PG_TYPE_INVALID;
2374         unsigned char op = scsi_get_prot_op(sc);
2375
2376         switch (op) {
2377         case SCSI_PROT_READ_STRIP:
2378         case SCSI_PROT_WRITE_INSERT:
2379                 ret = LPFC_PG_TYPE_NO_DIF;
2380                 break;
2381         case SCSI_PROT_READ_INSERT:
2382         case SCSI_PROT_WRITE_STRIP:
2383         case SCSI_PROT_READ_PASS:
2384         case SCSI_PROT_WRITE_PASS:
2385                 ret = LPFC_PG_TYPE_DIF_BUF;
2386                 break;
2387         default:
2388                 if (phba)
2389                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2390                                         "9021 Unsupported protection op:%d\n",
2391                                         op);
2392                 break;
2393         }
2394         return ret;
2395 }
2396
2397 /**
2398  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2399  * @phba: The Hba for which this call is being executed.
2400  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2401  *
2402  * Adjust the data length to account for how much data
2403  * is actually on the wire.
2404  *
2405  * returns the adjusted data length
2406  **/
2407 static int
2408 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2409                        struct lpfc_scsi_buf *lpfc_cmd)
2410 {
2411         struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2412         int fcpdl;
2413
2414         fcpdl = scsi_bufflen(sc);
2415
2416         /* Check if there is protection data on the wire */
2417         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2418                 /* Read check for protection data */
2419                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2420                         return fcpdl;
2421
2422         } else {
2423                 /* Write check for protection data */
2424                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2425                         return fcpdl;
2426         }
2427
2428         /*
2429          * If we are in DIF Type 1 mode every data block has a 8 byte
2430          * DIF (trailer) attached to it. Must ajust FCP data length
2431          * to account for the protection data.
2432          */
2433         fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2434
2435         return fcpdl;
2436 }
2437
2438 /**
2439  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2440  * @phba: The Hba for which this call is being executed.
2441  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2442  *
2443  * This is the protection/DIF aware version of
2444  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2445  * two functions eventually, but for now, it's here
2446  **/
2447 static int
2448 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2449                 struct lpfc_scsi_buf *lpfc_cmd)
2450 {
2451         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2452         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2453         struct ulp_bde64 *bpl = lpfc_cmd->dma_sgl;
2454         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2455         uint32_t num_bde = 0;
2456         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2457         int prot_group_type = 0;
2458         int fcpdl;
2459         struct lpfc_vport *vport = phba->pport;
2460
2461         /*
2462          * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2463          *  fcp_rsp regions to the first data bde entry
2464          */
2465         bpl += 2;
2466         if (scsi_sg_count(scsi_cmnd)) {
2467                 /*
2468                  * The driver stores the segment count returned from pci_map_sg
2469                  * because this a count of dma-mappings used to map the use_sg
2470                  * pages.  They are not guaranteed to be the same for those
2471                  * architectures that implement an IOMMU.
2472                  */
2473                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2474                                         scsi_sglist(scsi_cmnd),
2475                                         scsi_sg_count(scsi_cmnd), datadir);
2476                 if (unlikely(!datasegcnt))
2477                         return 1;
2478
2479                 lpfc_cmd->seg_cnt = datasegcnt;
2480
2481                 /* First check if data segment count from SCSI Layer is good */
2482                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2483                         goto err;
2484
2485                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2486
2487                 switch (prot_group_type) {
2488                 case LPFC_PG_TYPE_NO_DIF:
2489
2490                         /* Here we need to add a PDE5 and PDE6 to the count */
2491                         if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2492                                 goto err;
2493
2494                         num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2495                                         datasegcnt);
2496                         /* we should have 2 or more entries in buffer list */
2497                         if (num_bde < 2)
2498                                 goto err;
2499                         break;
2500
2501                 case LPFC_PG_TYPE_DIF_BUF:
2502                         /*
2503                          * This type indicates that protection buffers are
2504                          * passed to the driver, so that needs to be prepared
2505                          * for DMA
2506                          */
2507                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
2508                                         scsi_prot_sglist(scsi_cmnd),
2509                                         scsi_prot_sg_count(scsi_cmnd), datadir);
2510                         if (unlikely(!protsegcnt)) {
2511                                 scsi_dma_unmap(scsi_cmnd);
2512                                 return 1;
2513                         }
2514
2515                         lpfc_cmd->prot_seg_cnt = protsegcnt;
2516
2517                         /*
2518                          * There is a minimun of 4 BPLs used for every
2519                          * protection data segment.
2520                          */
2521                         if ((lpfc_cmd->prot_seg_cnt * 4) >
2522                             (phba->cfg_total_seg_cnt - 2))
2523                                 goto err;
2524
2525                         num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2526                                         datasegcnt, protsegcnt);
2527                         /* we should have 3 or more entries in buffer list */
2528                         if ((num_bde < 3) ||
2529                             (num_bde > phba->cfg_total_seg_cnt))
2530                                 goto err;
2531                         break;
2532
2533                 case LPFC_PG_TYPE_INVALID:
2534                 default:
2535                         scsi_dma_unmap(scsi_cmnd);
2536                         lpfc_cmd->seg_cnt = 0;
2537
2538                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2539                                         "9022 Unexpected protection group %i\n",
2540                                         prot_group_type);
2541                         return 1;
2542                 }
2543         }
2544
2545         /*
2546          * Finish initializing those IOCB fields that are dependent on the
2547          * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2548          * reinitialized since all iocb memory resources are used many times
2549          * for transmit, receive, and continuation bpl's.
2550          */
2551         iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2552         iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2553         iocb_cmd->ulpBdeCount = 1;
2554         iocb_cmd->ulpLe = 1;
2555
2556         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2557         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2558
2559         /*
2560          * Due to difference in data length between DIF/non-DIF paths,
2561          * we need to set word 4 of IOCB here
2562          */
2563         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2564
2565         /*
2566          * For First burst, we may need to adjust the initial transfer
2567          * length for DIF
2568          */
2569         if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2570             (fcpdl < vport->cfg_first_burst_size))
2571                 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2572
2573         return 0;
2574 err:
2575         if (lpfc_cmd->seg_cnt)
2576                 scsi_dma_unmap(scsi_cmnd);
2577         if (lpfc_cmd->prot_seg_cnt)
2578                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2579                              scsi_prot_sg_count(scsi_cmnd),
2580                              scsi_cmnd->sc_data_direction);
2581
2582         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2583                         "9023 Cannot setup S/G List for HBA"
2584                         "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2585                         lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2586                         phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2587                         prot_group_type, num_bde);
2588
2589         lpfc_cmd->seg_cnt = 0;
2590         lpfc_cmd->prot_seg_cnt = 0;
2591         return 1;
2592 }
2593
2594 /*
2595  * This function calcuates the T10 DIF guard tag
2596  * on the specified data using a CRC algorithmn
2597  * using crc_t10dif.
2598  */
2599 static uint16_t
2600 lpfc_bg_crc(uint8_t *data, int count)
2601 {
2602         uint16_t crc = 0;
2603         uint16_t x;
2604
2605         crc = crc_t10dif(data, count);
2606         x = cpu_to_be16(crc);
2607         return x;
2608 }
2609
2610 /*
2611  * This function calcuates the T10 DIF guard tag
2612  * on the specified data using a CSUM algorithmn
2613  * using ip_compute_csum.
2614  */
2615 static uint16_t
2616 lpfc_bg_csum(uint8_t *data, int count)
2617 {
2618         uint16_t ret;
2619
2620         ret = ip_compute_csum(data, count);
2621         return ret;
2622 }
2623
2624 /*
2625  * This function examines the protection data to try to determine
2626  * what type of T10-DIF error occurred.
2627  */
2628 static void
2629 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2630 {
2631         struct scatterlist *sgpe; /* s/g prot entry */
2632         struct scatterlist *sgde; /* s/g data entry */
2633         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2634         struct scsi_dif_tuple *src = NULL;
2635         uint8_t *data_src = NULL;
2636         uint16_t guard_tag;
2637         uint16_t start_app_tag, app_tag;
2638         uint32_t start_ref_tag, ref_tag;
2639         int prot, protsegcnt;
2640         int err_type, len, data_len;
2641         int chk_ref, chk_app, chk_guard;
2642         uint16_t sum;
2643         unsigned blksize;
2644
2645         err_type = BGS_GUARD_ERR_MASK;
2646         sum = 0;
2647         guard_tag = 0;
2648
2649         /* First check to see if there is protection data to examine */
2650         prot = scsi_get_prot_op(cmd);
2651         if ((prot == SCSI_PROT_READ_STRIP) ||
2652             (prot == SCSI_PROT_WRITE_INSERT) ||
2653             (prot == SCSI_PROT_NORMAL))
2654                 goto out;
2655
2656         /* Currently the driver just supports ref_tag and guard_tag checking */
2657         chk_ref = 1;
2658         chk_app = 0;
2659         chk_guard = 0;
2660
2661         /* Setup a ptr to the protection data provided by the SCSI host */
2662         sgpe = scsi_prot_sglist(cmd);
2663         protsegcnt = lpfc_cmd->prot_seg_cnt;
2664
2665         if (sgpe && protsegcnt) {
2666
2667                 /*
2668                  * We will only try to verify guard tag if the segment
2669                  * data length is a multiple of the blksize.
2670                  */
2671                 sgde = scsi_sglist(cmd);
2672                 blksize = lpfc_cmd_blksize(cmd);
2673                 data_src = (uint8_t *)sg_virt(sgde);
2674                 data_len = sgde->length;
2675                 if ((data_len & (blksize - 1)) == 0)
2676                         chk_guard = 1;
2677
2678                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2679                 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2680                 start_app_tag = src->app_tag;
2681                 len = sgpe->length;
2682                 while (src && protsegcnt) {
2683                         while (len) {
2684
2685                                 /*
2686                                  * First check to see if a protection data
2687                                  * check is valid
2688                                  */
2689                                 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2690                                     (src->app_tag == T10_PI_APP_ESCAPE)) {
2691                                         start_ref_tag++;
2692                                         goto skipit;
2693                                 }
2694
2695                                 /* First Guard Tag checking */
2696                                 if (chk_guard) {
2697                                         guard_tag = src->guard_tag;
2698                                         if (lpfc_cmd_guard_csum(cmd))
2699                                                 sum = lpfc_bg_csum(data_src,
2700                                                                    blksize);
2701                                         else
2702                                                 sum = lpfc_bg_crc(data_src,
2703                                                                   blksize);
2704                                         if ((guard_tag != sum)) {
2705                                                 err_type = BGS_GUARD_ERR_MASK;
2706                                                 goto out;
2707                                         }
2708                                 }
2709
2710                                 /* Reference Tag checking */
2711                                 ref_tag = be32_to_cpu(src->ref_tag);
2712                                 if (chk_ref && (ref_tag != start_ref_tag)) {
2713                                         err_type = BGS_REFTAG_ERR_MASK;
2714                                         goto out;
2715                                 }
2716                                 start_ref_tag++;
2717
2718                                 /* App Tag checking */
2719                                 app_tag = src->app_tag;
2720                                 if (chk_app && (app_tag != start_app_tag)) {
2721                                         err_type = BGS_APPTAG_ERR_MASK;
2722                                         goto out;
2723                                 }
2724 skipit:
2725                                 len -= sizeof(struct scsi_dif_tuple);
2726                                 if (len < 0)
2727                                         len = 0;
2728                                 src++;
2729
2730                                 data_src += blksize;
2731                                 data_len -= blksize;
2732
2733                                 /*
2734                                  * Are we at the end of the Data segment?
2735                                  * The data segment is only used for Guard
2736                                  * tag checking.
2737                                  */
2738                                 if (chk_guard && (data_len == 0)) {
2739                                         chk_guard = 0;
2740                                         sgde = sg_next(sgde);
2741                                         if (!sgde)
2742                                                 goto out;
2743
2744                                         data_src = (uint8_t *)sg_virt(sgde);
2745                                         data_len = sgde->length;
2746                                         if ((data_len & (blksize - 1)) == 0)
2747                                                 chk_guard = 1;
2748                                 }
2749                         }
2750
2751                         /* Goto the next Protection data segment */
2752                         sgpe = sg_next(sgpe);
2753                         if (sgpe) {
2754                                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2755                                 len = sgpe->length;
2756                         } else {
2757                                 src = NULL;
2758                         }
2759                         protsegcnt--;
2760                 }
2761         }
2762 out:
2763         if (err_type == BGS_GUARD_ERR_MASK) {
2764                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2765                                         0x10, 0x1);
2766                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2767                               SAM_STAT_CHECK_CONDITION;
2768                 phba->bg_guard_err_cnt++;
2769                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2770                                 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2771                                 (unsigned long)scsi_get_lba(cmd),
2772                                 sum, guard_tag);
2773
2774         } else if (err_type == BGS_REFTAG_ERR_MASK) {
2775                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2776                                         0x10, 0x3);
2777                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2778                               SAM_STAT_CHECK_CONDITION;
2779
2780                 phba->bg_reftag_err_cnt++;
2781                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2782                                 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2783                                 (unsigned long)scsi_get_lba(cmd),
2784                                 ref_tag, start_ref_tag);
2785
2786         } else if (err_type == BGS_APPTAG_ERR_MASK) {
2787                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2788                                         0x10, 0x2);
2789                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2790                               SAM_STAT_CHECK_CONDITION;
2791
2792                 phba->bg_apptag_err_cnt++;
2793                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2794                                 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2795                                 (unsigned long)scsi_get_lba(cmd),
2796                                 app_tag, start_app_tag);
2797         }
2798 }
2799
2800
2801 /*
2802  * This function checks for BlockGuard errors detected by
2803  * the HBA.  In case of errors, the ASC/ASCQ fields in the
2804  * sense buffer will be set accordingly, paired with
2805  * ILLEGAL_REQUEST to signal to the kernel that the HBA
2806  * detected corruption.
2807  *
2808  * Returns:
2809  *  0 - No error found
2810  *  1 - BlockGuard error found
2811  * -1 - Internal error (bad profile, ...etc)
2812  */
2813 static int
2814 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2815                         struct lpfc_iocbq *pIocbOut)
2816 {
2817         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2818         struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2819         int ret = 0;
2820         uint32_t bghm = bgf->bghm;
2821         uint32_t bgstat = bgf->bgstat;
2822         uint64_t failing_sector = 0;
2823
2824         spin_lock(&_dump_buf_lock);
2825         if (!_dump_buf_done) {
2826                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
2827                         " Data for %u blocks to debugfs\n",
2828                                 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2829                 lpfc_debug_save_data(phba, cmd);
2830
2831                 /* If we have a prot sgl, save the DIF buffer */
2832                 if (lpfc_prot_group_type(phba, cmd) ==
2833                                 LPFC_PG_TYPE_DIF_BUF) {
2834                         lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
2835                                 "Saving DIF for %u blocks to debugfs\n",
2836                                 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2837                         lpfc_debug_save_dif(phba, cmd);
2838                 }
2839
2840                 _dump_buf_done = 1;
2841         }
2842         spin_unlock(&_dump_buf_lock);
2843
2844         if (lpfc_bgs_get_invalid_prof(bgstat)) {
2845                 cmd->result = DID_ERROR << 16;
2846                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2847                                 "9072 BLKGRD: Invalid BG Profile in cmd"
2848                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2849                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2850                                 (unsigned long long)scsi_get_lba(cmd),
2851                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2852                 ret = (-1);
2853                 goto out;
2854         }
2855
2856         if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2857                 cmd->result = DID_ERROR << 16;
2858                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2859                                 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2860                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2861                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2862                                 (unsigned long long)scsi_get_lba(cmd),
2863                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2864                 ret = (-1);
2865                 goto out;
2866         }
2867
2868         if (lpfc_bgs_get_guard_err(bgstat)) {
2869                 ret = 1;
2870
2871                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2872                                 0x10, 0x1);
2873                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2874                               SAM_STAT_CHECK_CONDITION;
2875                 phba->bg_guard_err_cnt++;
2876                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2877                                 "9055 BLKGRD: Guard Tag error in cmd"
2878                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2879                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2880                                 (unsigned long long)scsi_get_lba(cmd),
2881                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2882         }
2883
2884         if (lpfc_bgs_get_reftag_err(bgstat)) {
2885                 ret = 1;
2886
2887                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2888                                 0x10, 0x3);
2889                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2890                               SAM_STAT_CHECK_CONDITION;
2891
2892                 phba->bg_reftag_err_cnt++;
2893                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2894                                 "9056 BLKGRD: Ref Tag error in cmd"
2895                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2896                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2897                                 (unsigned long long)scsi_get_lba(cmd),
2898                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2899         }
2900
2901         if (lpfc_bgs_get_apptag_err(bgstat)) {
2902                 ret = 1;
2903
2904                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2905                                 0x10, 0x2);
2906                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2907                               SAM_STAT_CHECK_CONDITION;
2908
2909                 phba->bg_apptag_err_cnt++;
2910                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2911                                 "9061 BLKGRD: App Tag error in cmd"
2912                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2913                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2914                                 (unsigned long long)scsi_get_lba(cmd),
2915                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2916         }
2917
2918         if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2919                 /*
2920                  * setup sense data descriptor 0 per SPC-4 as an information
2921                  * field, and put the failing LBA in it.
2922                  * This code assumes there was also a guard/app/ref tag error
2923                  * indication.
2924                  */
2925                 cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
2926                 cmd->sense_buffer[8] = 0;     /* Information descriptor type */
2927                 cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
2928                 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2929
2930                 /* bghm is a "on the wire" FC frame based count */
2931                 switch (scsi_get_prot_op(cmd)) {
2932                 case SCSI_PROT_READ_INSERT:
2933                 case SCSI_PROT_WRITE_STRIP:
2934                         bghm /= cmd->device->sector_size;
2935                         break;
2936                 case SCSI_PROT_READ_STRIP:
2937                 case SCSI_PROT_WRITE_INSERT:
2938                 case SCSI_PROT_READ_PASS:
2939                 case SCSI_PROT_WRITE_PASS:
2940                         bghm /= (cmd->device->sector_size +
2941                                 sizeof(struct scsi_dif_tuple));
2942                         break;
2943                 }
2944
2945                 failing_sector = scsi_get_lba(cmd);
2946                 failing_sector += bghm;
2947
2948                 /* Descriptor Information */
2949                 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
2950         }
2951
2952         if (!ret) {
2953                 /* No error was reported - problem in FW? */
2954                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2955                                 "9057 BLKGRD: Unknown error in cmd"
2956                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2957                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2958                                 (unsigned long long)scsi_get_lba(cmd),
2959                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2960
2961                 /* Calcuate what type of error it was */
2962                 lpfc_calc_bg_err(phba, lpfc_cmd);
2963         }
2964 out:
2965         return ret;
2966 }
2967
2968 /**
2969  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
2970  * @phba: The Hba for which this call is being executed.
2971  * @lpfc_cmd: The scsi buffer which is going to be mapped.
2972  *
2973  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
2974  * field of @lpfc_cmd for device with SLI-4 interface spec.
2975  *
2976  * Return codes:
2977  *      1 - Error
2978  *      0 - Success
2979  **/
2980 static int
2981 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2982 {
2983         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2984         struct scatterlist *sgel = NULL;
2985         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2986         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
2987         struct sli4_sge *first_data_sgl;
2988         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2989         dma_addr_t physaddr;
2990         uint32_t num_bde = 0;
2991         uint32_t dma_len;
2992         uint32_t dma_offset = 0;
2993         int nseg;
2994         struct ulp_bde64 *bde;
2995
2996         /*
2997          * There are three possibilities here - use scatter-gather segment, use
2998          * the single mapping, or neither.  Start the lpfc command prep by
2999          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3000          * data bde entry.
3001          */
3002         if (scsi_sg_count(scsi_cmnd)) {
3003                 /*
3004                  * The driver stores the segment count returned from pci_map_sg
3005                  * because this a count of dma-mappings used to map the use_sg
3006                  * pages.  They are not guaranteed to be the same for those
3007                  * architectures that implement an IOMMU.
3008                  */
3009
3010                 nseg = scsi_dma_map(scsi_cmnd);
3011                 if (unlikely(nseg <= 0))
3012                         return 1;
3013                 sgl += 1;
3014                 /* clear the last flag in the fcp_rsp map entry */
3015                 sgl->word2 = le32_to_cpu(sgl->word2);
3016                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3017                 sgl->word2 = cpu_to_le32(sgl->word2);
3018                 sgl += 1;
3019                 first_data_sgl = sgl;
3020                 lpfc_cmd->seg_cnt = nseg;
3021                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3022                         lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3023                                 " %s: Too many sg segments from "
3024                                 "dma_map_sg.  Config %d, seg_cnt %d\n",
3025                                 __func__, phba->cfg_sg_seg_cnt,
3026                                lpfc_cmd->seg_cnt);
3027                         lpfc_cmd->seg_cnt = 0;
3028                         scsi_dma_unmap(scsi_cmnd);
3029                         return 1;
3030                 }
3031
3032                 /*
3033                  * The driver established a maximum scatter-gather segment count
3034                  * during probe that limits the number of sg elements in any
3035                  * single scsi command.  Just run through the seg_cnt and format
3036                  * the sge's.
3037                  * When using SLI-3 the driver will try to fit all the BDEs into
3038                  * the IOCB. If it can't then the BDEs get added to a BPL as it
3039                  * does for SLI-2 mode.
3040                  */
3041                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3042                         physaddr = sg_dma_address(sgel);
3043                         dma_len = sg_dma_len(sgel);
3044                         sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3045                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3046                         sgl->word2 = le32_to_cpu(sgl->word2);
3047                         if ((num_bde + 1) == nseg)
3048                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3049                         else
3050                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3051                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3052                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3053                         sgl->word2 = cpu_to_le32(sgl->word2);
3054                         sgl->sge_len = cpu_to_le32(dma_len);
3055                         dma_offset += dma_len;
3056                         sgl++;
3057                 }
3058                 /*
3059                  * Setup the first Payload BDE. For FCoE we just key off
3060                  * Performance Hints, for FC we use lpfc_enable_pbde.
3061                  * We populate words 13-15 of IOCB/WQE.
3062                  */
3063                 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3064                     phba->cfg_enable_pbde) {
3065                         bde = (struct ulp_bde64 *)
3066                                 &(iocb_cmd->unsli3.sli3Words[5]);
3067                         bde->addrLow = first_data_sgl->addr_lo;
3068                         bde->addrHigh = first_data_sgl->addr_hi;
3069                         bde->tus.f.bdeSize =
3070                                         le32_to_cpu(first_data_sgl->sge_len);
3071                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3072                         bde->tus.w = cpu_to_le32(bde->tus.w);
3073                 }
3074         } else {
3075                 sgl += 1;
3076                 /* clear the last flag in the fcp_rsp map entry */
3077                 sgl->word2 = le32_to_cpu(sgl->word2);
3078                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3079                 sgl->word2 = cpu_to_le32(sgl->word2);
3080
3081                 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3082                     phba->cfg_enable_pbde) {
3083                         bde = (struct ulp_bde64 *)
3084                                 &(iocb_cmd->unsli3.sli3Words[5]);
3085                         memset(bde, 0, (sizeof(uint32_t) * 3));
3086                 }
3087         }
3088
3089         /*
3090          * Finish initializing those IOCB fields that are dependent on the
3091          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3092          * explicitly reinitialized.
3093          * all iocb memory resources are reused.
3094          */
3095         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3096
3097         /*
3098          * Due to difference in data length between DIF/non-DIF paths,
3099          * we need to set word 4 of IOCB here
3100          */
3101         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3102
3103         /*
3104          * If the OAS driver feature is enabled and the lun is enabled for
3105          * OAS, set the oas iocb related flags.
3106          */
3107         if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3108                 scsi_cmnd->device->hostdata)->oas_enabled) {
3109                 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3110                 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3111                         scsi_cmnd->device->hostdata)->priority;
3112         }
3113         return 0;
3114 }
3115
3116 /**
3117  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3118  * @phba: The Hba for which this call is being executed.
3119  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3120  *
3121  * This is the protection/DIF aware version of
3122  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3123  * two functions eventually, but for now, it's here
3124  **/
3125 static int
3126 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3127                 struct lpfc_scsi_buf *lpfc_cmd)
3128 {
3129         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3130         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3131         struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3132         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3133         uint32_t num_sge = 0;
3134         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3135         int prot_group_type = 0;
3136         int fcpdl;
3137         struct lpfc_vport *vport = phba->pport;
3138
3139         /*
3140          * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3141          *  fcp_rsp regions to the first data sge entry
3142          */
3143         if (scsi_sg_count(scsi_cmnd)) {
3144                 /*
3145                  * The driver stores the segment count returned from pci_map_sg
3146                  * because this a count of dma-mappings used to map the use_sg
3147                  * pages.  They are not guaranteed to be the same for those
3148                  * architectures that implement an IOMMU.
3149                  */
3150                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3151                                         scsi_sglist(scsi_cmnd),
3152                                         scsi_sg_count(scsi_cmnd), datadir);
3153                 if (unlikely(!datasegcnt))
3154                         return 1;
3155
3156                 sgl += 1;
3157                 /* clear the last flag in the fcp_rsp map entry */
3158                 sgl->word2 = le32_to_cpu(sgl->word2);
3159                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3160                 sgl->word2 = cpu_to_le32(sgl->word2);
3161
3162                 sgl += 1;
3163                 lpfc_cmd->seg_cnt = datasegcnt;
3164
3165                 /* First check if data segment count from SCSI Layer is good */
3166                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3167                         goto err;
3168
3169                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3170
3171                 switch (prot_group_type) {
3172                 case LPFC_PG_TYPE_NO_DIF:
3173                         /* Here we need to add a DISEED to the count */
3174                         if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3175                                 goto err;
3176
3177                         num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3178                                         datasegcnt);
3179
3180                         /* we should have 2 or more entries in buffer list */
3181                         if (num_sge < 2)
3182                                 goto err;
3183                         break;
3184
3185                 case LPFC_PG_TYPE_DIF_BUF:
3186                         /*
3187                          * This type indicates that protection buffers are
3188                          * passed to the driver, so that needs to be prepared
3189                          * for DMA
3190                          */
3191                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
3192                                         scsi_prot_sglist(scsi_cmnd),
3193                                         scsi_prot_sg_count(scsi_cmnd), datadir);
3194                         if (unlikely(!protsegcnt)) {
3195                                 scsi_dma_unmap(scsi_cmnd);
3196                                 return 1;
3197                         }
3198
3199                         lpfc_cmd->prot_seg_cnt = protsegcnt;
3200                         /*
3201                          * There is a minimun of 3 SGEs used for every
3202                          * protection data segment.
3203                          */
3204                         if ((lpfc_cmd->prot_seg_cnt * 3) >
3205                             (phba->cfg_total_seg_cnt - 2))
3206                                 goto err;
3207
3208                         num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3209                                         datasegcnt, protsegcnt);
3210
3211                         /* we should have 3 or more entries in buffer list */
3212                         if ((num_sge < 3) ||
3213                             (num_sge > phba->cfg_total_seg_cnt))
3214                                 goto err;
3215                         break;
3216
3217                 case LPFC_PG_TYPE_INVALID:
3218                 default:
3219                         scsi_dma_unmap(scsi_cmnd);
3220                         lpfc_cmd->seg_cnt = 0;
3221
3222                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3223                                         "9083 Unexpected protection group %i\n",
3224                                         prot_group_type);
3225                         return 1;
3226                 }
3227         }
3228
3229         switch (scsi_get_prot_op(scsi_cmnd)) {
3230         case SCSI_PROT_WRITE_STRIP:
3231         case SCSI_PROT_READ_STRIP:
3232                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3233                 break;
3234         case SCSI_PROT_WRITE_INSERT:
3235         case SCSI_PROT_READ_INSERT:
3236                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3237                 break;
3238         case SCSI_PROT_WRITE_PASS:
3239         case SCSI_PROT_READ_PASS:
3240                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3241                 break;
3242         }
3243
3244         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3245         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3246
3247         /*
3248          * Due to difference in data length between DIF/non-DIF paths,
3249          * we need to set word 4 of IOCB here
3250          */
3251         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3252
3253         /*
3254          * For First burst, we may need to adjust the initial transfer
3255          * length for DIF
3256          */
3257         if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3258             (fcpdl < vport->cfg_first_burst_size))
3259                 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3260
3261         /*
3262          * If the OAS driver feature is enabled and the lun is enabled for
3263          * OAS, set the oas iocb related flags.
3264          */
3265         if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3266                 scsi_cmnd->device->hostdata)->oas_enabled)
3267                 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3268
3269         return 0;
3270 err:
3271         if (lpfc_cmd->seg_cnt)
3272                 scsi_dma_unmap(scsi_cmnd);
3273         if (lpfc_cmd->prot_seg_cnt)
3274                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3275                              scsi_prot_sg_count(scsi_cmnd),
3276                              scsi_cmnd->sc_data_direction);
3277
3278         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3279                         "9084 Cannot setup S/G List for HBA"
3280                         "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3281                         lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3282                         phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3283                         prot_group_type, num_sge);
3284
3285         lpfc_cmd->seg_cnt = 0;
3286         lpfc_cmd->prot_seg_cnt = 0;
3287         return 1;
3288 }
3289
3290 /**
3291  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3292  * @phba: The Hba for which this call is being executed.
3293  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3294  *
3295  * This routine wraps the actual DMA mapping function pointer from the
3296  * lpfc_hba struct.
3297  *
3298  * Return codes:
3299  *      1 - Error
3300  *      0 - Success
3301  **/
3302 static inline int
3303 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3304 {
3305         return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3306 }
3307
3308 /**
3309  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3310  * using BlockGuard.
3311  * @phba: The Hba for which this call is being executed.
3312  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3313  *
3314  * This routine wraps the actual DMA mapping function pointer from the
3315  * lpfc_hba struct.
3316  *
3317  * Return codes:
3318  *      1 - Error
3319  *      0 - Success
3320  **/
3321 static inline int
3322 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3323 {
3324         return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3325 }
3326
3327 /**
3328  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3329  * @phba: Pointer to hba context object.
3330  * @vport: Pointer to vport object.
3331  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3332  * @rsp_iocb: Pointer to response iocb object which reported error.
3333  *
3334  * This function posts an event when there is a SCSI command reporting
3335  * error from the scsi device.
3336  **/
3337 static void
3338 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3339                 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3340         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3341         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3342         uint32_t resp_info = fcprsp->rspStatus2;
3343         uint32_t scsi_status = fcprsp->rspStatus3;
3344         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3345         struct lpfc_fast_path_event *fast_path_evt = NULL;
3346         struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3347         unsigned long flags;
3348
3349         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3350                 return;
3351
3352         /* If there is queuefull or busy condition send a scsi event */
3353         if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3354                 (cmnd->result == SAM_STAT_BUSY)) {
3355                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3356                 if (!fast_path_evt)
3357                         return;
3358                 fast_path_evt->un.scsi_evt.event_type =
3359                         FC_REG_SCSI_EVENT;
3360                 fast_path_evt->un.scsi_evt.subcategory =
3361                 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3362                 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3363                 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3364                 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3365                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3366                 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3367                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3368         } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3369                 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3370                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3371                 if (!fast_path_evt)
3372                         return;
3373                 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3374                         FC_REG_SCSI_EVENT;
3375                 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3376                         LPFC_EVENT_CHECK_COND;
3377                 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3378                         cmnd->device->lun;
3379                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3380                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3381                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3382                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3383                 fast_path_evt->un.check_cond_evt.sense_key =
3384                         cmnd->sense_buffer[2] & 0xf;
3385                 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3386                 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3387         } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3388                      fcpi_parm &&
3389                      ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3390                         ((scsi_status == SAM_STAT_GOOD) &&
3391                         !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3392                 /*
3393                  * If status is good or resid does not match with fcp_param and
3394                  * there is valid fcpi_parm, then there is a read_check error
3395                  */
3396                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3397                 if (!fast_path_evt)
3398                         return;
3399                 fast_path_evt->un.read_check_error.header.event_type =
3400                         FC_REG_FABRIC_EVENT;
3401                 fast_path_evt->un.read_check_error.header.subcategory =
3402                         LPFC_EVENT_FCPRDCHKERR;
3403                 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3404                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3405                 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3406                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3407                 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3408                 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3409                 fast_path_evt->un.read_check_error.fcpiparam =
3410                         fcpi_parm;
3411         } else
3412                 return;
3413
3414         fast_path_evt->vport = vport;
3415         spin_lock_irqsave(&phba->hbalock, flags);
3416         list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3417         spin_unlock_irqrestore(&phba->hbalock, flags);
3418         lpfc_worker_wake_up(phba);
3419         return;
3420 }
3421
3422 /**
3423  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3424  * @phba: The HBA for which this call is being executed.
3425  * @psb: The scsi buffer which is going to be un-mapped.
3426  *
3427  * This routine does DMA un-mapping of scatter gather list of scsi command
3428  * field of @lpfc_cmd for device with SLI-3 interface spec.
3429  **/
3430 static void
3431 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3432 {
3433         /*
3434          * There are only two special cases to consider.  (1) the scsi command
3435          * requested scatter-gather usage or (2) the scsi command allocated
3436          * a request buffer, but did not request use_sg.  There is a third
3437          * case, but it does not require resource deallocation.
3438          */
3439         if (psb->seg_cnt > 0)
3440                 scsi_dma_unmap(psb->pCmd);
3441         if (psb->prot_seg_cnt > 0)
3442                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3443                                 scsi_prot_sg_count(psb->pCmd),
3444                                 psb->pCmd->sc_data_direction);
3445 }
3446
3447 /**
3448  * lpfc_handler_fcp_err - FCP response handler
3449  * @vport: The virtual port for which this call is being executed.
3450  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3451  * @rsp_iocb: The response IOCB which contains FCP error.
3452  *
3453  * This routine is called to process response IOCB with status field
3454  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3455  * based upon SCSI and FCP error.
3456  **/
3457 static void
3458 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3459                     struct lpfc_iocbq *rsp_iocb)
3460 {
3461         struct lpfc_hba *phba = vport->phba;
3462         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3463         struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3464         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3465         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3466         uint32_t resp_info = fcprsp->rspStatus2;
3467         uint32_t scsi_status = fcprsp->rspStatus3;
3468         uint32_t *lp;
3469         uint32_t host_status = DID_OK;
3470         uint32_t rsplen = 0;
3471         uint32_t fcpDl;
3472         uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3473
3474
3475         /*
3476          *  If this is a task management command, there is no
3477          *  scsi packet associated with this lpfc_cmd.  The driver
3478          *  consumes it.
3479          */
3480         if (fcpcmd->fcpCntl2) {
3481                 scsi_status = 0;
3482                 goto out;
3483         }
3484
3485         if (resp_info & RSP_LEN_VALID) {
3486                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3487                 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3488                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3489                                  "2719 Invalid response length: "
3490                                  "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3491                                  cmnd->device->id,
3492                                  cmnd->device->lun, cmnd->cmnd[0],
3493                                  rsplen);
3494                         host_status = DID_ERROR;
3495                         goto out;
3496                 }
3497                 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3498                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3499                                  "2757 Protocol failure detected during "
3500                                  "processing of FCP I/O op: "
3501                                  "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3502                                  cmnd->device->id,
3503                                  cmnd->device->lun, cmnd->cmnd[0],
3504                                  fcprsp->rspInfo3);
3505                         host_status = DID_ERROR;
3506                         goto out;
3507                 }
3508         }
3509
3510         if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3511                 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3512                 if (snslen > SCSI_SENSE_BUFFERSIZE)
3513                         snslen = SCSI_SENSE_BUFFERSIZE;
3514
3515                 if (resp_info & RSP_LEN_VALID)
3516                   rsplen = be32_to_cpu(fcprsp->rspRspLen);
3517                 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3518         }
3519         lp = (uint32_t *)cmnd->sense_buffer;
3520
3521         /* special handling for under run conditions */
3522         if (!scsi_status && (resp_info & RESID_UNDER)) {
3523                 /* don't log under runs if fcp set... */
3524                 if (vport->cfg_log_verbose & LOG_FCP)
3525                         logit = LOG_FCP_ERROR;
3526                 /* unless operator says so */
3527                 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3528                         logit = LOG_FCP_UNDER;
3529         }
3530
3531         lpfc_printf_vlog(vport, KERN_WARNING, logit,
3532                          "9024 FCP command x%x failed: x%x SNS x%x x%x "
3533                          "Data: x%x x%x x%x x%x x%x\n",
3534                          cmnd->cmnd[0], scsi_status,
3535                          be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3536                          be32_to_cpu(fcprsp->rspResId),
3537                          be32_to_cpu(fcprsp->rspSnsLen),
3538                          be32_to_cpu(fcprsp->rspRspLen),
3539                          fcprsp->rspInfo3);
3540
3541         scsi_set_resid(cmnd, 0);
3542         fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3543         if (resp_info & RESID_UNDER) {
3544                 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3545
3546                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3547                                  "9025 FCP Underrun, expected %d, "
3548                                  "residual %d Data: x%x x%x x%x\n",
3549                                  fcpDl,
3550                                  scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3551                                  cmnd->underflow);
3552
3553                 /*
3554                  * If there is an under run, check if under run reported by
3555                  * storage array is same as the under run reported by HBA.
3556                  * If this is not same, there is a dropped frame.
3557                  */
3558                 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3559                         lpfc_printf_vlog(vport, KERN_WARNING,
3560                                          LOG_FCP | LOG_FCP_ERROR,
3561                                          "9026 FCP Read Check Error "
3562                                          "and Underrun Data: x%x x%x x%x x%x\n",
3563                                          fcpDl,
3564                                          scsi_get_resid(cmnd), fcpi_parm,
3565                                          cmnd->cmnd[0]);
3566                         scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3567                         host_status = DID_ERROR;
3568                 }
3569                 /*
3570                  * The cmnd->underflow is the minimum number of bytes that must
3571                  * be transferred for this command.  Provided a sense condition
3572                  * is not present, make sure the actual amount transferred is at
3573                  * least the underflow value or fail.
3574                  */
3575                 if (!(resp_info & SNS_LEN_VALID) &&
3576                     (scsi_status == SAM_STAT_GOOD) &&
3577                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3578                      < cmnd->underflow)) {
3579                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3580                                          "9027 FCP command x%x residual "
3581                                          "underrun converted to error "
3582                                          "Data: x%x x%x x%x\n",
3583                                          cmnd->cmnd[0], scsi_bufflen(cmnd),
3584                                          scsi_get_resid(cmnd), cmnd->underflow);
3585                         host_status = DID_ERROR;
3586                 }
3587         } else if (resp_info & RESID_OVER) {
3588                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3589                                  "9028 FCP command x%x residual overrun error. "
3590                                  "Data: x%x x%x\n", cmnd->cmnd[0],
3591                                  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3592                 host_status = DID_ERROR;
3593
3594         /*
3595          * Check SLI validation that all the transfer was actually done
3596          * (fcpi_parm should be zero). Apply check only to reads.
3597          */
3598         } else if (fcpi_parm) {
3599                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3600                                  "9029 FCP %s Check Error xri x%x  Data: "
3601                                  "x%x x%x x%x x%x x%x\n",
3602                                  ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3603                                  "Read" : "Write"),
3604                                  ((phba->sli_rev == LPFC_SLI_REV4) ?
3605                                  lpfc_cmd->cur_iocbq.sli4_xritag :
3606                                  rsp_iocb->iocb.ulpContext),
3607                                  fcpDl, be32_to_cpu(fcprsp->rspResId),
3608                                  fcpi_parm, cmnd->cmnd[0], scsi_status);
3609
3610                 /* There is some issue with the LPe12000 that causes it
3611                  * to miscalculate the fcpi_parm and falsely trip this
3612                  * recovery logic.  Detect this case and don't error when true.
3613                  */
3614                 if (fcpi_parm > fcpDl)
3615                         goto out;
3616
3617                 switch (scsi_status) {
3618                 case SAM_STAT_GOOD:
3619                 case SAM_STAT_CHECK_CONDITION:
3620                         /* Fabric dropped a data frame. Fail any successful
3621                          * command in which we detected dropped frames.
3622                          * A status of good or some check conditions could
3623                          * be considered a successful command.
3624                          */
3625                         host_status = DID_ERROR;
3626                         break;
3627                 }
3628                 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3629         }
3630
3631  out:
3632         cmnd->result = host_status << 16 | scsi_status;
3633         lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3634 }
3635
3636 /**
3637  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
3638  * @phba: Pointer to HBA context object.
3639  *
3640  * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
3641  * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
3642  * held.
3643  * If scsi-mq is enabled, get the default block layer mapping of software queues
3644  * to hardware queues. This information is saved in request tag.
3645  *
3646  * Return: index into SLI4 fast-path FCP queue index.
3647  **/
3648 int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
3649                                   struct lpfc_scsi_buf *lpfc_cmd)
3650 {
3651         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3652         struct lpfc_vector_map_info *cpup;
3653         int chann, cpu;
3654         uint32_t tag;
3655         uint16_t hwq;
3656
3657         if (cmnd) {
3658                 tag = blk_mq_unique_tag(cmnd->request);
3659                 hwq = blk_mq_unique_tag_to_hwq(tag);
3660
3661                 return hwq;
3662         }
3663
3664         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
3665             && phba->cfg_fcp_io_channel > 1) {
3666                 cpu = lpfc_cmd->cpu;
3667                 if (cpu < phba->sli4_hba.num_present_cpu) {
3668                         cpup = phba->sli4_hba.cpu_map;
3669                         cpup += cpu;
3670                         return cpup->channel_id;
3671                 }
3672         }
3673         chann = atomic_add_return(1, &phba->fcp_qidx);
3674         chann = chann % phba->cfg_fcp_io_channel;
3675         return chann;
3676 }
3677
3678
3679 /**
3680  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3681  * @phba: The Hba for which this call is being executed.
3682  * @pIocbIn: The command IOCBQ for the scsi cmnd.
3683  * @pIocbOut: The response IOCBQ for the scsi cmnd.
3684  *
3685  * This routine assigns scsi command result by looking into response IOCB
3686  * status field appropriately. This routine handles QUEUE FULL condition as
3687  * well by ramping down device queue depth.
3688  **/
3689 static void
3690 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3691                         struct lpfc_iocbq *pIocbOut)
3692 {
3693         struct lpfc_scsi_buf *lpfc_cmd =
3694                 (struct lpfc_scsi_buf *) pIocbIn->context1;
3695         struct lpfc_vport      *vport = pIocbIn->vport;
3696         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3697         struct lpfc_nodelist *pnode = rdata->pnode;
3698         struct scsi_cmnd *cmd;
3699         unsigned long flags;
3700         struct lpfc_fast_path_event *fast_path_evt;
3701         struct Scsi_Host *shost;
3702         uint32_t logit = LOG_FCP;
3703
3704         atomic_inc(&phba->fc4ScsiIoCmpls);
3705
3706         /* Sanity check on return of outstanding command */
3707         cmd = lpfc_cmd->pCmd;
3708         if (!cmd)
3709                 return;
3710         shost = cmd->device->host;
3711
3712         lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3713         lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3714         /* pick up SLI4 exhange busy status from HBA */
3715         lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3716
3717 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3718         if (lpfc_cmd->prot_data_type) {
3719                 struct scsi_dif_tuple *src = NULL;
3720
3721                 src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3722                 /*
3723                  * Used to restore any changes to protection
3724                  * data for error injection.
3725                  */
3726                 switch (lpfc_cmd->prot_data_type) {
3727                 case LPFC_INJERR_REFTAG:
3728                         src->ref_tag =
3729                                 lpfc_cmd->prot_data;
3730                         break;
3731                 case LPFC_INJERR_APPTAG:
3732                         src->app_tag =
3733                                 (uint16_t)lpfc_cmd->prot_data;
3734                         break;
3735                 case LPFC_INJERR_GUARD:
3736                         src->guard_tag =
3737                                 (uint16_t)lpfc_cmd->prot_data;
3738                         break;
3739                 default:
3740                         break;
3741                 }
3742
3743                 lpfc_cmd->prot_data = 0;
3744                 lpfc_cmd->prot_data_type = 0;
3745                 lpfc_cmd->prot_data_segment = NULL;
3746         }
3747 #endif
3748
3749         if (lpfc_cmd->status) {
3750                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3751                     (lpfc_cmd->result & IOERR_DRVR_MASK))
3752                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3753                 else if (lpfc_cmd->status >= IOSTAT_CNT)
3754                         lpfc_cmd->status = IOSTAT_DEFAULT;
3755                 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3756                     !lpfc_cmd->fcp_rsp->rspStatus3 &&
3757                     (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3758                     !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3759                         logit = 0;
3760                 else
3761                         logit = LOG_FCP | LOG_FCP_UNDER;
3762                 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3763                          "9030 FCP cmd x%x failed <%d/%lld> "
3764                          "status: x%x result: x%x "
3765                          "sid: x%x did: x%x oxid: x%x "
3766                          "Data: x%x x%x\n",
3767                          cmd->cmnd[0],
3768                          cmd->device ? cmd->device->id : 0xffff,
3769                          cmd->device ? cmd->device->lun : 0xffff,
3770                          lpfc_cmd->status, lpfc_cmd->result,
3771                          vport->fc_myDID,
3772                          (pnode) ? pnode->nlp_DID : 0,
3773                          phba->sli_rev == LPFC_SLI_REV4 ?
3774                              lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3775                          pIocbOut->iocb.ulpContext,
3776                          lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3777
3778                 switch (lpfc_cmd->status) {
3779                 case IOSTAT_FCP_RSP_ERROR:
3780                         /* Call FCP RSP handler to determine result */
3781                         lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3782                         break;
3783                 case IOSTAT_NPORT_BSY:
3784                 case IOSTAT_FABRIC_BSY:
3785                         cmd->result = DID_TRANSPORT_DISRUPTED << 16;
3786                         fast_path_evt = lpfc_alloc_fast_evt(phba);
3787                         if (!fast_path_evt)
3788                                 break;
3789                         fast_path_evt->un.fabric_evt.event_type =
3790                                 FC_REG_FABRIC_EVENT;
3791                         fast_path_evt->un.fabric_evt.subcategory =
3792                                 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3793                                 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3794                         if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3795                                 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3796                                         &pnode->nlp_portname,
3797                                         sizeof(struct lpfc_name));
3798                                 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3799                                         &pnode->nlp_nodename,
3800                                         sizeof(struct lpfc_name));
3801                         }
3802                         fast_path_evt->vport = vport;
3803                         fast_path_evt->work_evt.evt =
3804                                 LPFC_EVT_FASTPATH_MGMT_EVT;
3805                         spin_lock_irqsave(&phba->hbalock, flags);
3806                         list_add_tail(&fast_path_evt->work_evt.evt_listp,
3807                                 &phba->work_list);
3808                         spin_unlock_irqrestore(&phba->hbalock, flags);
3809                         lpfc_worker_wake_up(phba);
3810                         break;
3811                 case IOSTAT_LOCAL_REJECT:
3812                 case IOSTAT_REMOTE_STOP:
3813                         if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3814                             lpfc_cmd->result ==
3815                                         IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3816                             lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3817                             lpfc_cmd->result ==
3818                                         IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3819                                 cmd->result = DID_NO_CONNECT << 16;
3820                                 break;
3821                         }
3822                         if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3823                             lpfc_cmd->result == IOERR_NO_RESOURCES ||
3824                             lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3825                             lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3826                                 cmd->result = DID_REQUEUE << 16;
3827                                 break;
3828                         }
3829                         if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3830                              lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3831                              pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3832                                 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3833                                         /*
3834                                          * This is a response for a BG enabled
3835                                          * cmd. Parse BG error
3836                                          */
3837                                         lpfc_parse_bg_err(phba, lpfc_cmd,
3838                                                         pIocbOut);
3839                                         break;
3840                                 } else {
3841                                         lpfc_printf_vlog(vport, KERN_WARNING,
3842                                                         LOG_BG,
3843                                                         "9031 non-zero BGSTAT "
3844                                                         "on unprotected cmd\n");
3845                                 }
3846                         }
3847                         if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3848                                 && (phba->sli_rev == LPFC_SLI_REV4)
3849                                 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3850                                 /* This IO was aborted by the target, we don't
3851                                  * know the rxid and because we did not send the
3852                                  * ABTS we cannot generate and RRQ.
3853                                  */
3854                                 lpfc_set_rrq_active(phba, pnode,
3855                                         lpfc_cmd->cur_iocbq.sli4_lxritag,
3856                                         0, 0);
3857                         }
3858                 /* else: fall through */
3859                 default:
3860                         cmd->result = DID_ERROR << 16;
3861                         break;
3862                 }
3863
3864                 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3865                     || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3866                         cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
3867                                       SAM_STAT_BUSY;
3868         } else
3869                 cmd->result = DID_OK << 16;
3870
3871         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3872                 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3873
3874                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3875                                  "0710 Iodone <%d/%llu> cmd %p, error "
3876                                  "x%x SNS x%x x%x Data: x%x x%x\n",
3877                                  cmd->device->id, cmd->device->lun, cmd,
3878                                  cmd->result, *lp, *(lp + 3), cmd->retries,
3879                                  scsi_get_resid(cmd));
3880         }
3881
3882         lpfc_update_stats(phba, lpfc_cmd);
3883         if (vport->cfg_max_scsicmpl_time &&
3884            time_after(jiffies, lpfc_cmd->start_time +
3885                 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
3886                 spin_lock_irqsave(shost->host_lock, flags);
3887                 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3888                         if (pnode->cmd_qdepth >
3889                                 atomic_read(&pnode->cmd_pending) &&
3890                                 (atomic_read(&pnode->cmd_pending) >
3891                                 LPFC_MIN_TGT_QDEPTH) &&
3892                                 ((cmd->cmnd[0] == READ_10) ||
3893                                 (cmd->cmnd[0] == WRITE_10)))
3894                                 pnode->cmd_qdepth =
3895                                         atomic_read(&pnode->cmd_pending);
3896
3897                         pnode->last_change_time = jiffies;
3898                 }
3899                 spin_unlock_irqrestore(shost->host_lock, flags);
3900         }
3901         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3902
3903         /* If pCmd was set to NULL from abort path, do not call scsi_done */
3904         if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
3905                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3906                                  "5688 FCP cmd already NULL, sid: 0x%06x, "
3907                                  "did: 0x%06x, oxid: 0x%04x\n",
3908                                  vport->fc_myDID,
3909                                  (pnode) ? pnode->nlp_DID : 0,
3910                                  phba->sli_rev == LPFC_SLI_REV4 ?
3911                                  lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
3912                 return;
3913         }
3914
3915         /* The sdev is not guaranteed to be valid post scsi_done upcall. */
3916         cmd->scsi_done(cmd);
3917
3918         /*
3919          * If there is a thread waiting for command completion
3920          * wake up the thread.
3921          */
3922         spin_lock_irqsave(shost->host_lock, flags);
3923         if (lpfc_cmd->waitq)
3924                 wake_up(lpfc_cmd->waitq);
3925         spin_unlock_irqrestore(shost->host_lock, flags);
3926
3927         lpfc_release_scsi_buf(phba, lpfc_cmd);
3928 }
3929
3930 /**
3931  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
3932  * @data: A pointer to the immediate command data portion of the IOCB.
3933  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
3934  *
3935  * The routine copies the entire FCP command from @fcp_cmnd to @data while
3936  * byte swapping the data to big endian format for transmission on the wire.
3937  **/
3938 static void
3939 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
3940 {
3941         int i, j;
3942         for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
3943              i += sizeof(uint32_t), j++) {
3944                 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
3945         }
3946 }
3947
3948 /**
3949  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
3950  * @vport: The virtual port for which this call is being executed.
3951  * @lpfc_cmd: The scsi command which needs to send.
3952  * @pnode: Pointer to lpfc_nodelist.
3953  *
3954  * This routine initializes fcp_cmnd and iocb data structure from scsi command
3955  * to transfer for device with SLI3 interface spec.
3956  **/
3957 static void
3958 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3959                     struct lpfc_nodelist *pnode)
3960 {
3961         struct lpfc_hba *phba = vport->phba;
3962         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3963         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3964         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3965         struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3966         int datadir = scsi_cmnd->sc_data_direction;
3967         uint8_t *ptr;
3968         bool sli4;
3969         uint32_t fcpdl;
3970
3971         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3972                 return;
3973
3974         lpfc_cmd->fcp_rsp->rspSnsLen = 0;
3975         /* clear task management bits */
3976         lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
3977
3978         int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3979                         &lpfc_cmd->fcp_cmnd->fcp_lun);
3980
3981         ptr = &fcp_cmnd->fcpCdb[0];
3982         memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3983         if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
3984                 ptr += scsi_cmnd->cmd_len;
3985                 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
3986         }
3987
3988         fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3989
3990         sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3991         piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
3992
3993         /*
3994          * There are three possibilities here - use scatter-gather segment, use
3995          * the single mapping, or neither.  Start the lpfc command prep by
3996          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3997          * data bde entry.
3998          */
3999         if (scsi_sg_count(scsi_cmnd)) {
4000                 if (datadir == DMA_TO_DEVICE) {
4001                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4002                         iocb_cmd->ulpPU = PARM_READ_CHECK;
4003                         if (vport->cfg_first_burst_size &&
4004                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
4005                                 fcpdl = scsi_bufflen(scsi_cmnd);
4006                                 if (fcpdl < vport->cfg_first_burst_size)
4007                                         piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4008                                 else
4009                                         piocbq->iocb.un.fcpi.fcpi_XRdy =
4010                                                 vport->cfg_first_burst_size;
4011                         }
4012                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
4013                         atomic_inc(&phba->fc4ScsiOutputRequests);
4014                 } else {
4015                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4016                         iocb_cmd->ulpPU = PARM_READ_CHECK;
4017                         fcp_cmnd->fcpCntl3 = READ_DATA;
4018                         atomic_inc(&phba->fc4ScsiInputRequests);
4019                 }
4020         } else {
4021                 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4022                 iocb_cmd->un.fcpi.fcpi_parm = 0;
4023                 iocb_cmd->ulpPU = 0;
4024                 fcp_cmnd->fcpCntl3 = 0;
4025                 atomic_inc(&phba->fc4ScsiControlRequests);
4026         }
4027         if (phba->sli_rev == 3 &&
4028             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4029                 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4030         /*
4031          * Finish initializing those IOCB fields that are independent
4032          * of the scsi_cmnd request_buffer
4033          */
4034         piocbq->iocb.ulpContext = pnode->nlp_rpi;
4035         if (sli4)
4036                 piocbq->iocb.ulpContext =
4037                   phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4038         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4039                 piocbq->iocb.ulpFCP2Rcvy = 1;
4040         else
4041                 piocbq->iocb.ulpFCP2Rcvy = 0;
4042
4043         piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4044         piocbq->context1  = lpfc_cmd;
4045         piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4046         piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4047         piocbq->vport = vport;
4048 }
4049
4050 /**
4051  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4052  * @vport: The virtual port for which this call is being executed.
4053  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4054  * @lun: Logical unit number.
4055  * @task_mgmt_cmd: SCSI task management command.
4056  *
4057  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4058  * for device with SLI-3 interface spec.
4059  *
4060  * Return codes:
4061  *   0 - Error
4062  *   1 - Success
4063  **/
4064 static int
4065 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4066                              struct lpfc_scsi_buf *lpfc_cmd,
4067                              uint64_t lun,
4068                              uint8_t task_mgmt_cmd)
4069 {
4070         struct lpfc_iocbq *piocbq;
4071         IOCB_t *piocb;
4072         struct fcp_cmnd *fcp_cmnd;
4073         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4074         struct lpfc_nodelist *ndlp = rdata->pnode;
4075
4076         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4077             ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4078                 return 0;
4079
4080         piocbq = &(lpfc_cmd->cur_iocbq);
4081         piocbq->vport = vport;
4082
4083         piocb = &piocbq->iocb;
4084
4085         fcp_cmnd = lpfc_cmd->fcp_cmnd;
4086         /* Clear out any old data in the FCP command area */
4087         memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4088         int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4089         fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4090         if (vport->phba->sli_rev == 3 &&
4091             !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4092                 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4093         piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4094         piocb->ulpContext = ndlp->nlp_rpi;
4095         if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4096                 piocb->ulpContext =
4097                   vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4098         }
4099         piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4100         piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4101         piocb->ulpPU = 0;
4102         piocb->un.fcpi.fcpi_parm = 0;
4103
4104         /* ulpTimeout is only one byte */
4105         if (lpfc_cmd->timeout > 0xff) {
4106                 /*
4107                  * Do not timeout the command at the firmware level.
4108                  * The driver will provide the timeout mechanism.
4109                  */
4110                 piocb->ulpTimeout = 0;
4111         } else
4112                 piocb->ulpTimeout = lpfc_cmd->timeout;
4113
4114         if (vport->phba->sli_rev == LPFC_SLI_REV4)
4115                 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4116
4117         return 1;
4118 }
4119
4120 /**
4121  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4122  * @phba: The hba struct for which this call is being executed.
4123  * @dev_grp: The HBA PCI-Device group number.
4124  *
4125  * This routine sets up the SCSI interface API function jump table in @phba
4126  * struct.
4127  * Returns: 0 - success, -ENODEV - failure.
4128  **/
4129 int
4130 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4131 {
4132
4133         phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4134         phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4135
4136         switch (dev_grp) {
4137         case LPFC_PCI_DEV_LP:
4138                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4139                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4140                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4141                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4142                 break;
4143         case LPFC_PCI_DEV_OC:
4144                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4145                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4146                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4147                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4148                 break;
4149         default:
4150                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4151                                 "1418 Invalid HBA PCI-device group: 0x%x\n",
4152                                 dev_grp);
4153                 return -ENODEV;
4154                 break;
4155         }
4156         phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4157         phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4158         return 0;
4159 }
4160
4161 /**
4162  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4163  * @phba: The Hba for which this call is being executed.
4164  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4165  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4166  *
4167  * This routine is IOCB completion routine for device reset and target reset
4168  * routine. This routine release scsi buffer associated with lpfc_cmd.
4169  **/
4170 static void
4171 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4172                         struct lpfc_iocbq *cmdiocbq,
4173                         struct lpfc_iocbq *rspiocbq)
4174 {
4175         struct lpfc_scsi_buf *lpfc_cmd =
4176                 (struct lpfc_scsi_buf *) cmdiocbq->context1;
4177         if (lpfc_cmd)
4178                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4179         return;
4180 }
4181
4182 /**
4183  * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4184  *                             if issuing a pci_bus_reset is possibly unsafe
4185  * @phba: lpfc_hba pointer.
4186  *
4187  * Description:
4188  * Walks the bus_list to ensure only PCI devices with Emulex
4189  * vendor id, device ids that support hot reset, and only one occurrence
4190  * of function 0.
4191  *
4192  * Returns:
4193  * -EBADSLT,  detected invalid device
4194  *      0,    successful
4195  */
4196 int
4197 lpfc_check_pci_resettable(const struct lpfc_hba *phba)
4198 {
4199         const struct pci_dev *pdev = phba->pcidev;
4200         struct pci_dev *ptr = NULL;
4201         u8 counter = 0;
4202
4203         /* Walk the list of devices on the pci_dev's bus */
4204         list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4205                 /* Check for Emulex Vendor ID */
4206                 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4207                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4208                                         "8346 Non-Emulex vendor found: "
4209                                         "0x%04x\n", ptr->vendor);
4210                         return -EBADSLT;
4211                 }
4212
4213                 /* Check for valid Emulex Device ID */
4214                 switch (ptr->device) {
4215                 case PCI_DEVICE_ID_LANCER_FC:
4216                 case PCI_DEVICE_ID_LANCER_G6_FC:
4217                 case PCI_DEVICE_ID_LANCER_G7_FC:
4218                         break;
4219                 default:
4220                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4221                                         "8347 Invalid device found: "
4222                                         "0x%04x\n", ptr->device);
4223                         return -EBADSLT;
4224                 }
4225
4226                 /* Check for only one function 0 ID to ensure only one HBA on
4227                  * secondary bus
4228                  */
4229                 if (ptr->devfn == 0) {
4230                         if (++counter > 1) {
4231                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4232                                                 "8348 More than one device on "
4233                                                 "secondary bus found\n");
4234                                 return -EBADSLT;
4235                         }
4236                 }
4237         }
4238
4239         return 0;
4240 }
4241
4242 /**
4243  * lpfc_info - Info entry point of scsi_host_template data structure
4244  * @host: The scsi host for which this call is being executed.
4245  *
4246  * This routine provides module information about hba.
4247  *
4248  * Reutrn code:
4249  *   Pointer to char - Success.
4250  **/
4251 const char *
4252 lpfc_info(struct Scsi_Host *host)
4253 {
4254         struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4255         struct lpfc_hba   *phba = vport->phba;
4256         int link_speed = 0;
4257         static char lpfcinfobuf[384];
4258         char tmp[384] = {0};
4259
4260         memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4261         if (phba && phba->pcidev){
4262                 /* Model Description */
4263                 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4264                 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4265                     sizeof(lpfcinfobuf))
4266                         goto buffer_done;
4267
4268                 /* PCI Info */
4269                 scnprintf(tmp, sizeof(tmp),
4270                           " on PCI bus %02x device %02x irq %d",
4271                           phba->pcidev->bus->number, phba->pcidev->devfn,
4272                           phba->pcidev->irq);
4273                 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4274                     sizeof(lpfcinfobuf))
4275                         goto buffer_done;
4276
4277                 /* Port Number */
4278                 if (phba->Port[0]) {
4279                         scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4280                         if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4281                             sizeof(lpfcinfobuf))
4282                                 goto buffer_done;
4283                 }
4284
4285                 /* Link Speed */
4286                 link_speed = lpfc_sli_port_speed_get(phba);
4287                 if (link_speed != 0) {
4288                         scnprintf(tmp, sizeof(tmp),
4289                                   " Logical Link Speed: %d Mbps", link_speed);
4290                         if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4291                             sizeof(lpfcinfobuf))
4292                                 goto buffer_done;
4293                 }
4294
4295                 /* PCI resettable */
4296                 if (!lpfc_check_pci_resettable(phba)) {
4297                         scnprintf(tmp, sizeof(tmp), " PCI resettable");
4298                         strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4299                 }
4300         }
4301
4302 buffer_done:
4303         return lpfcinfobuf;
4304 }
4305
4306 /**
4307  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4308  * @phba: The Hba for which this call is being executed.
4309  *
4310  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4311  * The default value of cfg_poll_tmo is 10 milliseconds.
4312  **/
4313 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4314 {
4315         unsigned long  poll_tmo_expires =
4316                 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4317
4318         if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4319                 mod_timer(&phba->fcp_poll_timer,
4320                           poll_tmo_expires);
4321 }
4322
4323 /**
4324  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4325  * @phba: The Hba for which this call is being executed.
4326  *
4327  * This routine starts the fcp_poll_timer of @phba.
4328  **/
4329 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4330 {
4331         lpfc_poll_rearm_timer(phba);
4332 }
4333
4334 /**
4335  * lpfc_poll_timeout - Restart polling timer
4336  * @ptr: Map to lpfc_hba data structure pointer.
4337  *
4338  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4339  * and FCP Ring interrupt is disable.
4340  **/
4341
4342 void lpfc_poll_timeout(struct timer_list *t)
4343 {
4344         struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4345
4346         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4347                 lpfc_sli_handle_fast_ring_event(phba,
4348                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4349
4350                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4351                         lpfc_poll_rearm_timer(phba);
4352         }
4353 }
4354
4355 /**
4356  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4357  * @cmnd: Pointer to scsi_cmnd data structure.
4358  * @done: Pointer to done routine.
4359  *
4360  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4361  * This routine prepares an IOCB from scsi command and provides to firmware.
4362  * The @done callback is invoked after driver finished processing the command.
4363  *
4364  * Return value :
4365  *   0 - Success
4366  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4367  **/
4368 static int
4369 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4370 {
4371         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4372         struct lpfc_hba   *phba = vport->phba;
4373         struct lpfc_rport_data *rdata;
4374         struct lpfc_nodelist *ndlp;
4375         struct lpfc_scsi_buf *lpfc_cmd;
4376         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4377         int err;
4378
4379         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4380
4381         /* sanity check on references */
4382         if (unlikely(!rdata) || unlikely(!rport))
4383                 goto out_fail_command;
4384
4385         err = fc_remote_port_chkready(rport);
4386         if (err) {
4387                 cmnd->result = err;
4388                 goto out_fail_command;
4389         }
4390         ndlp = rdata->pnode;
4391
4392         if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4393                 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4394
4395                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4396                                 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4397                                 " op:%02x str=%s without registering for"
4398                                 " BlockGuard - Rejecting command\n",
4399                                 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4400                                 dif_op_str[scsi_get_prot_op(cmnd)]);
4401                 goto out_fail_command;
4402         }
4403
4404         /*
4405          * Catch race where our node has transitioned, but the
4406          * transport is still transitioning.
4407          */
4408         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4409                 goto out_tgt_busy;
4410         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4411                 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4412                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4413                                          "3377 Target Queue Full, scsi Id:%d "
4414                                          "Qdepth:%d Pending command:%d"
4415                                          " WWNN:%02x:%02x:%02x:%02x:"
4416                                          "%02x:%02x:%02x:%02x, "
4417                                          " WWPN:%02x:%02x:%02x:%02x:"
4418                                          "%02x:%02x:%02x:%02x",
4419                                          ndlp->nlp_sid, ndlp->cmd_qdepth,
4420                                          atomic_read(&ndlp->cmd_pending),
4421                                          ndlp->nlp_nodename.u.wwn[0],
4422                                          ndlp->nlp_nodename.u.wwn[1],
4423                                          ndlp->nlp_nodename.u.wwn[2],
4424                                          ndlp->nlp_nodename.u.wwn[3],
4425                                          ndlp->nlp_nodename.u.wwn[4],
4426                                          ndlp->nlp_nodename.u.wwn[5],
4427                                          ndlp->nlp_nodename.u.wwn[6],
4428                                          ndlp->nlp_nodename.u.wwn[7],
4429                                          ndlp->nlp_portname.u.wwn[0],
4430                                          ndlp->nlp_portname.u.wwn[1],
4431                                          ndlp->nlp_portname.u.wwn[2],
4432                                          ndlp->nlp_portname.u.wwn[3],
4433                                          ndlp->nlp_portname.u.wwn[4],
4434                                          ndlp->nlp_portname.u.wwn[5],
4435                                          ndlp->nlp_portname.u.wwn[6],
4436                                          ndlp->nlp_portname.u.wwn[7]);
4437                         goto out_tgt_busy;
4438                 }
4439         }
4440
4441         lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4442         if (lpfc_cmd == NULL) {
4443                 lpfc_rampdown_queue_depth(phba);
4444
4445                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4446                                  "0707 driver's buffer pool is empty, "
4447                                  "IO busied\n");
4448                 goto out_host_busy;
4449         }
4450
4451         /*
4452          * Store the midlayer's command structure for the completion phase
4453          * and complete the command initialization.
4454          */
4455         lpfc_cmd->pCmd  = cmnd;
4456         lpfc_cmd->rdata = rdata;
4457         lpfc_cmd->ndlp = ndlp;
4458         cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4459
4460         if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4461                 if (vport->phba->cfg_enable_bg) {
4462                         lpfc_printf_vlog(vport,
4463                                          KERN_INFO, LOG_SCSI_CMD,
4464                                          "9033 BLKGRD: rcvd %s cmd:x%x "
4465                                          "sector x%llx cnt %u pt %x\n",
4466                                          dif_op_str[scsi_get_prot_op(cmnd)],
4467                                          cmnd->cmnd[0],
4468                                          (unsigned long long)scsi_get_lba(cmnd),
4469                                          blk_rq_sectors(cmnd->request),
4470                                          (cmnd->cmnd[1]>>5));
4471                 }
4472                 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4473         } else {
4474                 if (vport->phba->cfg_enable_bg) {
4475                         lpfc_printf_vlog(vport,
4476                                          KERN_INFO, LOG_SCSI_CMD,
4477                                          "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4478                                          "x%x sector x%llx cnt %u pt %x\n",
4479                                          cmnd->cmnd[0],
4480                                          (unsigned long long)scsi_get_lba(cmnd),
4481                                          blk_rq_sectors(cmnd->request),
4482                                          (cmnd->cmnd[1]>>5));
4483                 }
4484                 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4485         }
4486
4487         if (err)
4488                 goto out_host_busy_free_buf;
4489
4490         lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4491
4492         err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4493                                   &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4494         if (err) {
4495                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4496                                  "3376 FCP could not issue IOCB err %x"
4497                                  "FCP cmd x%x <%d/%llu> "
4498                                  "sid: x%x did: x%x oxid: x%x "
4499                                  "Data: x%x x%x x%x x%x\n",
4500                                  err, cmnd->cmnd[0],
4501                                  cmnd->device ? cmnd->device->id : 0xffff,
4502                                  cmnd->device ? cmnd->device->lun : (u64) -1,
4503                                  vport->fc_myDID, ndlp->nlp_DID,
4504                                  phba->sli_rev == LPFC_SLI_REV4 ?
4505                                  lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4506                                  lpfc_cmd->cur_iocbq.iocb.ulpContext,
4507                                  lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4508                                  lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4509                                  (uint32_t)
4510                                  (cmnd->request->timeout / 1000));
4511
4512                 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4513                 case WRITE_DATA:
4514                         atomic_dec(&phba->fc4ScsiOutputRequests);
4515                         break;
4516                 case READ_DATA:
4517                         atomic_dec(&phba->fc4ScsiInputRequests);
4518                         break;
4519                 default:
4520                         atomic_dec(&phba->fc4ScsiControlRequests);
4521                 }
4522                 goto out_host_busy_free_buf;
4523         }
4524         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4525                 lpfc_sli_handle_fast_ring_event(phba,
4526                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4527
4528                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4529                         lpfc_poll_rearm_timer(phba);
4530         }
4531
4532         return 0;
4533
4534  out_host_busy_free_buf:
4535         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4536         lpfc_release_scsi_buf(phba, lpfc_cmd);
4537  out_host_busy:
4538         return SCSI_MLQUEUE_HOST_BUSY;
4539
4540  out_tgt_busy:
4541         return SCSI_MLQUEUE_TARGET_BUSY;
4542
4543  out_fail_command:
4544         cmnd->scsi_done(cmnd);
4545         return 0;
4546 }
4547
4548
4549 /**
4550  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4551  * @cmnd: Pointer to scsi_cmnd data structure.
4552  *
4553  * This routine aborts @cmnd pending in base driver.
4554  *
4555  * Return code :
4556  *   0x2003 - Error
4557  *   0x2002 - Success
4558  **/
4559 static int
4560 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4561 {
4562         struct Scsi_Host  *shost = cmnd->device->host;
4563         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4564         struct lpfc_hba   *phba = vport->phba;
4565         struct lpfc_iocbq *iocb;
4566         struct lpfc_iocbq *abtsiocb;
4567         struct lpfc_scsi_buf *lpfc_cmd;
4568         IOCB_t *cmd, *icmd;
4569         int ret = SUCCESS, status = 0;
4570         struct lpfc_sli_ring *pring_s4 = NULL;
4571         int ret_val;
4572         unsigned long flags;
4573         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4574
4575         status = fc_block_scsi_eh(cmnd);
4576         if (status != 0 && status != SUCCESS)
4577                 return status;
4578
4579         spin_lock_irqsave(&phba->hbalock, flags);
4580         /* driver queued commands are in process of being flushed */
4581         if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4582                 spin_unlock_irqrestore(&phba->hbalock, flags);
4583                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4584                         "3168 SCSI Layer abort requested I/O has been "
4585                         "flushed by LLD.\n");
4586                 return FAILED;
4587         }
4588
4589         lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4590         if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4591                 spin_unlock_irqrestore(&phba->hbalock, flags);
4592                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4593                          "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4594                          "x%x ID %d LUN %llu\n",
4595                          SUCCESS, cmnd->device->id, cmnd->device->lun);
4596                 return SUCCESS;
4597         }
4598
4599         iocb = &lpfc_cmd->cur_iocbq;
4600         if (phba->sli_rev == LPFC_SLI_REV4) {
4601                 if (!(phba->cfg_fof) ||
4602                     (!(iocb->iocb_flag & LPFC_IO_FOF))) {
4603                         pring_s4 =
4604                                 phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
4605                 } else {
4606                         iocb->hba_wqidx = 0;
4607                         pring_s4 = phba->sli4_hba.oas_wq->pring;
4608                 }
4609                 if (!pring_s4) {
4610                         ret = FAILED;
4611                         goto out_unlock;
4612                 }
4613                 spin_lock(&pring_s4->ring_lock);
4614         }
4615         /* the command is in process of being cancelled */
4616         if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4617                 if (phba->sli_rev == LPFC_SLI_REV4)
4618                         spin_unlock(&pring_s4->ring_lock);
4619                 spin_unlock_irqrestore(&phba->hbalock, flags);
4620                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4621                         "3169 SCSI Layer abort requested I/O has been "
4622                         "cancelled by LLD.\n");
4623                 return FAILED;
4624         }
4625         /*
4626          * If pCmd field of the corresponding lpfc_scsi_buf structure
4627          * points to a different SCSI command, then the driver has
4628          * already completed this command, but the midlayer did not
4629          * see the completion before the eh fired. Just return SUCCESS.
4630          */
4631         if (lpfc_cmd->pCmd != cmnd) {
4632                 if (phba->sli_rev == LPFC_SLI_REV4)
4633                         spin_unlock(&pring_s4->ring_lock);
4634                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4635                         "3170 SCSI Layer abort requested I/O has been "
4636                         "completed by LLD.\n");
4637                 goto out_unlock;
4638         }
4639
4640         BUG_ON(iocb->context1 != lpfc_cmd);
4641
4642         /* abort issued in recovery is still in progress */
4643         if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4644                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4645                          "3389 SCSI Layer I/O Abort Request is pending\n");
4646                 if (phba->sli_rev == LPFC_SLI_REV4)
4647                         spin_unlock(&pring_s4->ring_lock);
4648                 spin_unlock_irqrestore(&phba->hbalock, flags);
4649                 goto wait_for_cmpl;
4650         }
4651
4652         abtsiocb = __lpfc_sli_get_iocbq(phba);
4653         if (abtsiocb == NULL) {
4654                 ret = FAILED;
4655                 if (phba->sli_rev == LPFC_SLI_REV4)
4656                         spin_unlock(&pring_s4->ring_lock);
4657                 goto out_unlock;
4658         }
4659
4660         /* Indicate the IO is being aborted by the driver. */
4661         iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4662
4663         /*
4664          * The scsi command can not be in txq and it is in flight because the
4665          * pCmd is still pointig at the SCSI command we have to abort. There
4666          * is no need to search the txcmplq. Just send an abort to the FW.
4667          */
4668
4669         cmd = &iocb->iocb;
4670         icmd = &abtsiocb->iocb;
4671         icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4672         icmd->un.acxri.abortContextTag = cmd->ulpContext;
4673         if (phba->sli_rev == LPFC_SLI_REV4)
4674                 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4675         else
4676                 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4677
4678         icmd->ulpLe = 1;
4679         icmd->ulpClass = cmd->ulpClass;
4680
4681         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4682         abtsiocb->hba_wqidx = iocb->hba_wqidx;
4683         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4684         if (iocb->iocb_flag & LPFC_IO_FOF)
4685                 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4686
4687         if (lpfc_is_link_up(phba))
4688                 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4689         else
4690                 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4691
4692         abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4693         abtsiocb->vport = vport;
4694         lpfc_cmd->waitq = &waitq;
4695         if (phba->sli_rev == LPFC_SLI_REV4) {
4696                 /* Note: both hbalock and ring_lock must be set here */
4697                 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4698                                                 abtsiocb, 0);
4699                 spin_unlock(&pring_s4->ring_lock);
4700         } else {
4701                 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4702                                                 abtsiocb, 0);
4703         }
4704         /* no longer need the lock after this point */
4705         spin_unlock_irqrestore(&phba->hbalock, flags);
4706
4707
4708         if (ret_val == IOCB_ERROR) {
4709                 if (phba->sli_rev == LPFC_SLI_REV4)
4710                         spin_lock_irqsave(&pring_s4->ring_lock, flags);
4711                 else
4712                         spin_lock_irqsave(&phba->hbalock, flags);
4713                 /* Indicate the IO is not being aborted by the driver. */
4714                 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4715                 lpfc_cmd->waitq = NULL;
4716                 if (phba->sli_rev == LPFC_SLI_REV4)
4717                         spin_unlock_irqrestore(&pring_s4->ring_lock, flags);
4718                 else
4719                         spin_unlock_irqrestore(&phba->hbalock, flags);
4720                 lpfc_sli_release_iocbq(phba, abtsiocb);
4721                 ret = FAILED;
4722                 goto out;
4723         }
4724
4725         if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4726                 lpfc_sli_handle_fast_ring_event(phba,
4727                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4728
4729 wait_for_cmpl:
4730         /* Wait for abort to complete */
4731         wait_event_timeout(waitq,
4732                           (lpfc_cmd->pCmd != cmnd),
4733                            msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4734
4735         spin_lock_irqsave(shost->host_lock, flags);
4736         lpfc_cmd->waitq = NULL;
4737         spin_unlock_irqrestore(shost->host_lock, flags);
4738
4739         if (lpfc_cmd->pCmd == cmnd) {
4740                 ret = FAILED;
4741                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4742                                  "0748 abort handler timed out waiting "
4743                                  "for aborting I/O (xri:x%x) to complete: "
4744                                  "ret %#x, ID %d, LUN %llu\n",
4745                                  iocb->sli4_xritag, ret,
4746                                  cmnd->device->id, cmnd->device->lun);
4747         }
4748         goto out;
4749
4750 out_unlock:
4751         spin_unlock_irqrestore(&phba->hbalock, flags);
4752 out:
4753         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4754                          "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4755                          "LUN %llu\n", ret, cmnd->device->id,
4756                          cmnd->device->lun);
4757         return ret;
4758 }
4759
4760 static char *
4761 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4762 {
4763         switch (task_mgmt_cmd) {
4764         case FCP_ABORT_TASK_SET:
4765                 return "ABORT_TASK_SET";
4766         case FCP_CLEAR_TASK_SET:
4767                 return "FCP_CLEAR_TASK_SET";
4768         case FCP_BUS_RESET:
4769                 return "FCP_BUS_RESET";
4770         case FCP_LUN_RESET:
4771                 return "FCP_LUN_RESET";
4772         case FCP_TARGET_RESET:
4773                 return "FCP_TARGET_RESET";
4774         case FCP_CLEAR_ACA:
4775                 return "FCP_CLEAR_ACA";
4776         case FCP_TERMINATE_TASK:
4777                 return "FCP_TERMINATE_TASK";
4778         default:
4779                 return "unknown";
4780         }
4781 }
4782
4783
4784 /**
4785  * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4786  * @vport: The virtual port for which this call is being executed.
4787  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4788  *
4789  * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4790  *
4791  * Return code :
4792  *   0x2003 - Error
4793  *   0x2002 - Success
4794  **/
4795 static int
4796 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
4797 {
4798         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4799         uint32_t rsp_info;
4800         uint32_t rsp_len;
4801         uint8_t  rsp_info_code;
4802         int ret = FAILED;
4803
4804
4805         if (fcprsp == NULL)
4806                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4807                                  "0703 fcp_rsp is missing\n");
4808         else {
4809                 rsp_info = fcprsp->rspStatus2;
4810                 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4811                 rsp_info_code = fcprsp->rspInfo3;
4812
4813
4814                 lpfc_printf_vlog(vport, KERN_INFO,
4815                                  LOG_FCP,
4816                                  "0706 fcp_rsp valid 0x%x,"
4817                                  " rsp len=%d code 0x%x\n",
4818                                  rsp_info,
4819                                  rsp_len, rsp_info_code);
4820
4821                 if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
4822                         switch (rsp_info_code) {
4823                         case RSP_NO_FAILURE:
4824                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4825                                                  "0715 Task Mgmt No Failure\n");
4826                                 ret = SUCCESS;
4827                                 break;
4828                         case RSP_TM_NOT_SUPPORTED: /* TM rejected */
4829                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4830                                                  "0716 Task Mgmt Target "
4831                                                 "reject\n");
4832                                 break;
4833                         case RSP_TM_NOT_COMPLETED: /* TM failed */
4834                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4835                                                  "0717 Task Mgmt Target "
4836                                                 "failed TM\n");
4837                                 break;
4838                         case RSP_TM_INVALID_LU: /* TM to invalid LU! */
4839                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4840                                                  "0718 Task Mgmt to invalid "
4841                                                 "LUN\n");
4842                                 break;
4843                         }
4844                 }
4845         }
4846         return ret;
4847 }
4848
4849
4850 /**
4851  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4852  * @vport: The virtual port for which this call is being executed.
4853  * @rdata: Pointer to remote port local data
4854  * @tgt_id: Target ID of remote device.
4855  * @lun_id: Lun number for the TMF
4856  * @task_mgmt_cmd: type of TMF to send
4857  *
4858  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4859  * a remote port.
4860  *
4861  * Return Code:
4862  *   0x2003 - Error
4863  *   0x2002 - Success.
4864  **/
4865 static int
4866 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
4867                    unsigned int tgt_id, uint64_t lun_id,
4868                    uint8_t task_mgmt_cmd)
4869 {
4870         struct lpfc_hba   *phba = vport->phba;
4871         struct lpfc_scsi_buf *lpfc_cmd;
4872         struct lpfc_iocbq *iocbq;
4873         struct lpfc_iocbq *iocbqrsp;
4874         struct lpfc_rport_data *rdata;
4875         struct lpfc_nodelist *pnode;
4876         int ret;
4877         int status;
4878
4879         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4880         if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
4881                 return FAILED;
4882         pnode = rdata->pnode;
4883
4884         lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
4885         if (lpfc_cmd == NULL)
4886                 return FAILED;
4887         lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
4888         lpfc_cmd->rdata = rdata;
4889         lpfc_cmd->pCmd = cmnd;
4890         lpfc_cmd->ndlp = pnode;
4891
4892         status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4893                                            task_mgmt_cmd);
4894         if (!status) {
4895                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4896                 return FAILED;
4897         }
4898
4899         iocbq = &lpfc_cmd->cur_iocbq;
4900         iocbqrsp = lpfc_sli_get_iocbq(phba);
4901         if (iocbqrsp == NULL) {
4902                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4903                 return FAILED;
4904         }
4905         iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4906
4907         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4908                          "0702 Issue %s to TGT %d LUN %llu "
4909                          "rpi x%x nlp_flag x%x Data: x%x x%x\n",
4910                          lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
4911                          pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
4912                          iocbq->iocb_flag);
4913
4914         status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
4915                                           iocbq, iocbqrsp, lpfc_cmd->timeout);
4916         if ((status != IOCB_SUCCESS) ||
4917             (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
4918                 if (status != IOCB_SUCCESS ||
4919                     iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
4920                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4921                                          "0727 TMF %s to TGT %d LUN %llu "
4922                                          "failed (%d, %d) iocb_flag x%x\n",
4923                                          lpfc_taskmgmt_name(task_mgmt_cmd),
4924                                          tgt_id, lun_id,
4925                                          iocbqrsp->iocb.ulpStatus,
4926                                          iocbqrsp->iocb.un.ulpWord[4],
4927                                          iocbq->iocb_flag);
4928                 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
4929                 if (status == IOCB_SUCCESS) {
4930                         if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
4931                                 /* Something in the FCP_RSP was invalid.
4932                                  * Check conditions */
4933                                 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
4934                         else
4935                                 ret = FAILED;
4936                 } else if (status == IOCB_TIMEDOUT) {
4937                         ret = TIMEOUT_ERROR;
4938                 } else {
4939                         ret = FAILED;
4940                 }
4941         } else
4942                 ret = SUCCESS;
4943
4944         lpfc_sli_release_iocbq(phba, iocbqrsp);
4945
4946         if (ret != TIMEOUT_ERROR)
4947                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4948
4949         return ret;
4950 }
4951
4952 /**
4953  * lpfc_chk_tgt_mapped -
4954  * @vport: The virtual port to check on
4955  * @cmnd: Pointer to scsi_cmnd data structure.
4956  *
4957  * This routine delays until the scsi target (aka rport) for the
4958  * command exists (is present and logged in) or we declare it non-existent.
4959  *
4960  * Return code :
4961  *  0x2003 - Error
4962  *  0x2002 - Success
4963  **/
4964 static int
4965 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
4966 {
4967         struct lpfc_rport_data *rdata;
4968         struct lpfc_nodelist *pnode;
4969         unsigned long later;
4970
4971         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4972         if (!rdata) {
4973                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4974                         "0797 Tgt Map rport failure: rdata x%p\n", rdata);
4975                 return FAILED;
4976         }
4977         pnode = rdata->pnode;
4978         /*
4979          * If target is not in a MAPPED state, delay until
4980          * target is rediscovered or devloss timeout expires.
4981          */
4982         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4983         while (time_after(later, jiffies)) {
4984                 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4985                         return FAILED;
4986                 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
4987                         return SUCCESS;
4988                 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
4989                 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4990                 if (!rdata)
4991                         return FAILED;
4992                 pnode = rdata->pnode;
4993         }
4994         if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
4995             (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4996                 return FAILED;
4997         return SUCCESS;
4998 }
4999
5000 /**
5001  * lpfc_reset_flush_io_context -
5002  * @vport: The virtual port (scsi_host) for the flush context
5003  * @tgt_id: If aborting by Target contect - specifies the target id
5004  * @lun_id: If aborting by Lun context - specifies the lun id
5005  * @context: specifies the context level to flush at.
5006  *
5007  * After a reset condition via TMF, we need to flush orphaned i/o
5008  * contexts from the adapter. This routine aborts any contexts
5009  * outstanding, then waits for their completions. The wait is
5010  * bounded by devloss_tmo though.
5011  *
5012  * Return code :
5013  *  0x2003 - Error
5014  *  0x2002 - Success
5015  **/
5016 static int
5017 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5018                         uint64_t lun_id, lpfc_ctx_cmd context)
5019 {
5020         struct lpfc_hba   *phba = vport->phba;
5021         unsigned long later;
5022         int cnt;
5023
5024         cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5025         if (cnt)
5026                 lpfc_sli_abort_taskmgmt(vport,
5027                                         &phba->sli.sli3_ring[LPFC_FCP_RING],
5028                                         tgt_id, lun_id, context);
5029         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5030         while (time_after(later, jiffies) && cnt) {
5031                 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5032                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5033         }
5034         if (cnt) {
5035                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5036                         "0724 I/O flush failure for context %s : cnt x%x\n",
5037                         ((context == LPFC_CTX_LUN) ? "LUN" :
5038                          ((context == LPFC_CTX_TGT) ? "TGT" :
5039                           ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5040                         cnt);
5041                 return FAILED;
5042         }
5043         return SUCCESS;
5044 }
5045
5046 /**
5047  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5048  * @cmnd: Pointer to scsi_cmnd data structure.
5049  *
5050  * This routine does a device reset by sending a LUN_RESET task management
5051  * command.
5052  *
5053  * Return code :
5054  *  0x2003 - Error
5055  *  0x2002 - Success
5056  **/
5057 static int
5058 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5059 {
5060         struct Scsi_Host  *shost = cmnd->device->host;
5061         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5062         struct lpfc_rport_data *rdata;
5063         struct lpfc_nodelist *pnode;
5064         unsigned tgt_id = cmnd->device->id;
5065         uint64_t lun_id = cmnd->device->lun;
5066         struct lpfc_scsi_event_header scsi_event;
5067         int status;
5068
5069         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5070         if (!rdata || !rdata->pnode) {
5071                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5072                                  "0798 Device Reset rport failure: rdata x%p\n",
5073                                  rdata);
5074                 return FAILED;
5075         }
5076         pnode = rdata->pnode;
5077         status = fc_block_scsi_eh(cmnd);
5078         if (status != 0 && status != SUCCESS)
5079                 return status;
5080
5081         status = lpfc_chk_tgt_mapped(vport, cmnd);
5082         if (status == FAILED) {
5083                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5084                         "0721 Device Reset rport failure: rdata x%p\n", rdata);
5085                 return FAILED;
5086         }
5087
5088         scsi_event.event_type = FC_REG_SCSI_EVENT;
5089         scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5090         scsi_event.lun = lun_id;
5091         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5092         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5093
5094         fc_host_post_vendor_event(shost, fc_get_event_number(),
5095                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5096
5097         status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5098                                                 FCP_LUN_RESET);
5099
5100         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5101                          "0713 SCSI layer issued Device Reset (%d, %llu) "
5102                          "return x%x\n", tgt_id, lun_id, status);
5103
5104         /*
5105          * We have to clean up i/o as : they may be orphaned by the TMF;
5106          * or if the TMF failed, they may be in an indeterminate state.
5107          * So, continue on.
5108          * We will report success if all the i/o aborts successfully.
5109          */
5110         if (status == SUCCESS)
5111                 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5112                                                 LPFC_CTX_LUN);
5113
5114         return status;
5115 }
5116
5117 /**
5118  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5119  * @cmnd: Pointer to scsi_cmnd data structure.
5120  *
5121  * This routine does a target reset by sending a TARGET_RESET task management
5122  * command.
5123  *
5124  * Return code :
5125  *  0x2003 - Error
5126  *  0x2002 - Success
5127  **/
5128 static int
5129 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5130 {
5131         struct Scsi_Host  *shost = cmnd->device->host;
5132         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5133         struct lpfc_rport_data *rdata;
5134         struct lpfc_nodelist *pnode;
5135         unsigned tgt_id = cmnd->device->id;
5136         uint64_t lun_id = cmnd->device->lun;
5137         struct lpfc_scsi_event_header scsi_event;
5138         int status;
5139
5140         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5141         if (!rdata) {
5142                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5143                         "0799 Target Reset rport failure: rdata x%p\n", rdata);
5144                 return FAILED;
5145         }
5146         pnode = rdata->pnode;
5147         status = fc_block_scsi_eh(cmnd);
5148         if (status != 0 && status != SUCCESS)
5149                 return status;
5150
5151         status = lpfc_chk_tgt_mapped(vport, cmnd);
5152         if (status == FAILED) {
5153                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5154                         "0722 Target Reset rport failure: rdata x%p\n", rdata);
5155                 if (pnode) {
5156                         spin_lock_irq(shost->host_lock);
5157                         pnode->nlp_flag &= ~NLP_NPR_ADISC;
5158                         pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5159                         spin_unlock_irq(shost->host_lock);
5160                 }
5161                 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5162                                           LPFC_CTX_TGT);
5163                 return FAST_IO_FAIL;
5164         }
5165
5166         scsi_event.event_type = FC_REG_SCSI_EVENT;
5167         scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5168         scsi_event.lun = 0;
5169         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5170         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5171
5172         fc_host_post_vendor_event(shost, fc_get_event_number(),
5173                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5174
5175         status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5176                                         FCP_TARGET_RESET);
5177
5178         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5179                          "0723 SCSI layer issued Target Reset (%d, %llu) "
5180                          "return x%x\n", tgt_id, lun_id, status);
5181
5182         /*
5183          * We have to clean up i/o as : they may be orphaned by the TMF;
5184          * or if the TMF failed, they may be in an indeterminate state.
5185          * So, continue on.
5186          * We will report success if all the i/o aborts successfully.
5187          */
5188         if (status == SUCCESS)
5189                 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5190                                           LPFC_CTX_TGT);
5191         return status;
5192 }
5193
5194 /**
5195  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5196  * @cmnd: Pointer to scsi_cmnd data structure.
5197  *
5198  * This routine does target reset to all targets on @cmnd->device->host.
5199  * This emulates Parallel SCSI Bus Reset Semantics.
5200  *
5201  * Return code :
5202  *  0x2003 - Error
5203  *  0x2002 - Success
5204  **/
5205 static int
5206 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5207 {
5208         struct Scsi_Host  *shost = cmnd->device->host;
5209         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5210         struct lpfc_nodelist *ndlp = NULL;
5211         struct lpfc_scsi_event_header scsi_event;
5212         int match;
5213         int ret = SUCCESS, status, i;
5214
5215         scsi_event.event_type = FC_REG_SCSI_EVENT;
5216         scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5217         scsi_event.lun = 0;
5218         memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5219         memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5220
5221         fc_host_post_vendor_event(shost, fc_get_event_number(),
5222                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5223
5224         status = fc_block_scsi_eh(cmnd);
5225         if (status != 0 && status != SUCCESS)
5226                 return status;
5227
5228         /*
5229          * Since the driver manages a single bus device, reset all
5230          * targets known to the driver.  Should any target reset
5231          * fail, this routine returns failure to the midlayer.
5232          */
5233         for (i = 0; i < LPFC_MAX_TARGET; i++) {
5234                 /* Search for mapped node by target ID */
5235                 match = 0;
5236                 spin_lock_irq(shost->host_lock);
5237                 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5238                         if (!NLP_CHK_NODE_ACT(ndlp))
5239                                 continue;
5240                         if (vport->phba->cfg_fcp2_no_tgt_reset &&
5241                             (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5242                                 continue;
5243                         if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5244                             ndlp->nlp_sid == i &&
5245                             ndlp->rport &&
5246                             ndlp->nlp_type & NLP_FCP_TARGET) {
5247                                 match = 1;
5248                                 break;
5249                         }
5250                 }
5251                 spin_unlock_irq(shost->host_lock);
5252                 if (!match)
5253                         continue;
5254
5255                 status = lpfc_send_taskmgmt(vport, cmnd,
5256                                         i, 0, FCP_TARGET_RESET);
5257
5258                 if (status != SUCCESS) {
5259                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5260                                          "0700 Bus Reset on target %d failed\n",
5261                                          i);
5262                         ret = FAILED;
5263                 }
5264         }
5265         /*
5266          * We have to clean up i/o as : they may be orphaned by the TMFs
5267          * above; or if any of the TMFs failed, they may be in an
5268          * indeterminate state.
5269          * We will report success if all the i/o aborts successfully.
5270          */
5271
5272         status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5273         if (status != SUCCESS)
5274                 ret = FAILED;
5275
5276         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5277                          "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5278         return ret;
5279 }
5280
5281 /**
5282  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5283  * @cmnd: Pointer to scsi_cmnd data structure.
5284  *
5285  * This routine does host reset to the adaptor port. It brings the HBA
5286  * offline, performs a board restart, and then brings the board back online.
5287  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5288  * reject all outstanding SCSI commands to the host and error returned
5289  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5290  * of error handling, it will only return error if resetting of the adapter
5291  * is not successful; in all other cases, will return success.
5292  *
5293  * Return code :
5294  *  0x2003 - Error
5295  *  0x2002 - Success
5296  **/
5297 static int
5298 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5299 {
5300         struct Scsi_Host *shost = cmnd->device->host;
5301         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5302         struct lpfc_hba *phba = vport->phba;
5303         int rc, ret = SUCCESS;
5304
5305         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5306                          "3172 SCSI layer issued Host Reset Data:\n");
5307
5308         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5309         lpfc_offline(phba);
5310         rc = lpfc_sli_brdrestart(phba);
5311         if (rc)
5312                 ret = FAILED;
5313         rc = lpfc_online(phba);
5314         if (rc)
5315                 ret = FAILED;
5316         lpfc_unblock_mgmt_io(phba);
5317
5318         if (ret == FAILED) {
5319                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5320                                  "3323 Failed host reset, bring it offline\n");
5321                 lpfc_sli4_offline_eratt(phba);
5322         }
5323         return ret;
5324 }
5325
5326 /**
5327  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5328  * @sdev: Pointer to scsi_device.
5329  *
5330  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5331  * globally available list of scsi buffers. This routine also makes sure scsi
5332  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5333  * of scsi buffer exists for the lifetime of the driver.
5334  *
5335  * Return codes:
5336  *   non-0 - Error
5337  *   0 - Success
5338  **/
5339 static int
5340 lpfc_slave_alloc(struct scsi_device *sdev)
5341 {
5342         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5343         struct lpfc_hba   *phba = vport->phba;
5344         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5345         uint32_t total = 0;
5346         uint32_t num_to_alloc = 0;
5347         int num_allocated = 0;
5348         uint32_t sdev_cnt;
5349         struct lpfc_device_data *device_data;
5350         unsigned long flags;
5351         struct lpfc_name target_wwpn;
5352
5353         if (!rport || fc_remote_port_chkready(rport))
5354                 return -ENXIO;
5355
5356         if (phba->cfg_fof) {
5357
5358                 /*
5359                  * Check to see if the device data structure for the lun
5360                  * exists.  If not, create one.
5361                  */
5362
5363                 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5364                 spin_lock_irqsave(&phba->devicelock, flags);
5365                 device_data = __lpfc_get_device_data(phba,
5366                                                      &phba->luns,
5367                                                      &vport->fc_portname,
5368                                                      &target_wwpn,
5369                                                      sdev->lun);
5370                 if (!device_data) {
5371                         spin_unlock_irqrestore(&phba->devicelock, flags);
5372                         device_data = lpfc_create_device_data(phba,
5373                                                         &vport->fc_portname,
5374                                                         &target_wwpn,
5375                                                         sdev->lun,
5376                                                         phba->cfg_XLanePriority,
5377                                                         true);
5378                         if (!device_data)
5379                                 return -ENOMEM;
5380                         spin_lock_irqsave(&phba->devicelock, flags);
5381                         list_add_tail(&device_data->listentry, &phba->luns);
5382                 }
5383                 device_data->rport_data = rport->dd_data;
5384                 device_data->available = true;
5385                 spin_unlock_irqrestore(&phba->devicelock, flags);
5386                 sdev->hostdata = device_data;
5387         } else {
5388                 sdev->hostdata = rport->dd_data;
5389         }
5390         sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5391
5392         /* For SLI4, all IO buffers are pre-allocated */
5393         if (phba->sli_rev == LPFC_SLI_REV4)
5394                 return 0;
5395
5396         /* This code path is now ONLY for SLI3 adapters */
5397
5398         /*
5399          * Populate the cmds_per_lun count scsi_bufs into this host's globally
5400          * available list of scsi buffers.  Don't allocate more than the
5401          * HBA limit conveyed to the midlayer via the host structure.  The
5402          * formula accounts for the lun_queue_depth + error handlers + 1
5403          * extra.  This list of scsi bufs exists for the lifetime of the driver.
5404          */
5405         total = phba->total_scsi_bufs;
5406         num_to_alloc = vport->cfg_lun_queue_depth + 2;
5407
5408         /* If allocated buffers are enough do nothing */
5409         if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5410                 return 0;
5411
5412         /* Allow some exchanges to be available always to complete discovery */
5413         if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5414                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5415                                  "0704 At limitation of %d preallocated "
5416                                  "command buffers\n", total);
5417                 return 0;
5418         /* Allow some exchanges to be available always to complete discovery */
5419         } else if (total + num_to_alloc >
5420                 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5421                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5422                                  "0705 Allocation request of %d "
5423                                  "command buffers will exceed max of %d.  "
5424                                  "Reducing allocation request to %d.\n",
5425                                  num_to_alloc, phba->cfg_hba_queue_depth,
5426                                  (phba->cfg_hba_queue_depth - total));
5427                 num_to_alloc = phba->cfg_hba_queue_depth - total;
5428         }
5429         num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
5430         if (num_to_alloc != num_allocated) {
5431                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5432                                          "0708 Allocation request of %d "
5433                                          "command buffers did not succeed.  "
5434                                          "Allocated %d buffers.\n",
5435                                          num_to_alloc, num_allocated);
5436         }
5437         if (num_allocated > 0)
5438                 phba->total_scsi_bufs += num_allocated;
5439         return 0;
5440 }
5441
5442 /**
5443  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5444  * @sdev: Pointer to scsi_device.
5445  *
5446  * This routine configures following items
5447  *   - Tag command queuing support for @sdev if supported.
5448  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5449  *
5450  * Return codes:
5451  *   0 - Success
5452  **/
5453 static int
5454 lpfc_slave_configure(struct scsi_device *sdev)
5455 {
5456         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5457         struct lpfc_hba   *phba = vport->phba;
5458
5459         scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5460
5461         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5462                 lpfc_sli_handle_fast_ring_event(phba,
5463                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5464                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5465                         lpfc_poll_rearm_timer(phba);
5466         }
5467
5468         return 0;
5469 }
5470
5471 /**
5472  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5473  * @sdev: Pointer to scsi_device.
5474  *
5475  * This routine sets @sdev hostatdata filed to null.
5476  **/
5477 static void
5478 lpfc_slave_destroy(struct scsi_device *sdev)
5479 {
5480         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5481         struct lpfc_hba   *phba = vport->phba;
5482         unsigned long flags;
5483         struct lpfc_device_data *device_data = sdev->hostdata;
5484
5485         atomic_dec(&phba->sdev_cnt);
5486         if ((phba->cfg_fof) && (device_data)) {
5487                 spin_lock_irqsave(&phba->devicelock, flags);
5488                 device_data->available = false;
5489                 if (!device_data->oas_enabled)
5490                         lpfc_delete_device_data(phba, device_data);
5491                 spin_unlock_irqrestore(&phba->devicelock, flags);
5492         }
5493         sdev->hostdata = NULL;
5494         return;
5495 }
5496
5497 /**
5498  * lpfc_create_device_data - creates and initializes device data structure for OAS
5499  * @pha: Pointer to host bus adapter structure.
5500  * @vport_wwpn: Pointer to vport's wwpn information
5501  * @target_wwpn: Pointer to target's wwpn information
5502  * @lun: Lun on target
5503  * @atomic_create: Flag to indicate if memory should be allocated using the
5504  *                GFP_ATOMIC flag or not.
5505  *
5506  * This routine creates a device data structure which will contain identifying
5507  * information for the device (host wwpn, target wwpn, lun), state of OAS,
5508  * whether or not the corresponding lun is available by the system,
5509  * and pointer to the rport data.
5510  *
5511  * Return codes:
5512  *   NULL - Error
5513  *   Pointer to lpfc_device_data - Success
5514  **/
5515 struct lpfc_device_data*
5516 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5517                         struct lpfc_name *target_wwpn, uint64_t lun,
5518                         uint32_t pri, bool atomic_create)
5519 {
5520
5521         struct lpfc_device_data *lun_info;
5522         int memory_flags;
5523
5524         if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5525             !(phba->cfg_fof))
5526                 return NULL;
5527
5528         /* Attempt to create the device data to contain lun info */
5529
5530         if (atomic_create)
5531                 memory_flags = GFP_ATOMIC;
5532         else
5533                 memory_flags = GFP_KERNEL;
5534         lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5535         if (!lun_info)
5536                 return NULL;
5537         INIT_LIST_HEAD(&lun_info->listentry);
5538         lun_info->rport_data  = NULL;
5539         memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5540                sizeof(struct lpfc_name));
5541         memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5542                sizeof(struct lpfc_name));
5543         lun_info->device_id.lun = lun;
5544         lun_info->oas_enabled = false;
5545         lun_info->priority = pri;
5546         lun_info->available = false;
5547         return lun_info;
5548 }
5549
5550 /**
5551  * lpfc_delete_device_data - frees a device data structure for OAS
5552  * @pha: Pointer to host bus adapter structure.
5553  * @lun_info: Pointer to device data structure to free.
5554  *
5555  * This routine frees the previously allocated device data structure passed.
5556  *
5557  **/
5558 void
5559 lpfc_delete_device_data(struct lpfc_hba *phba,
5560                         struct lpfc_device_data *lun_info)
5561 {
5562
5563         if (unlikely(!phba) || !lun_info  ||
5564             !(phba->cfg_fof))
5565                 return;
5566
5567         if (!list_empty(&lun_info->listentry))
5568                 list_del(&lun_info->listentry);
5569         mempool_free(lun_info, phba->device_data_mem_pool);
5570         return;
5571 }
5572
5573 /**
5574  * __lpfc_get_device_data - returns the device data for the specified lun
5575  * @pha: Pointer to host bus adapter structure.
5576  * @list: Point to list to search.
5577  * @vport_wwpn: Pointer to vport's wwpn information
5578  * @target_wwpn: Pointer to target's wwpn information
5579  * @lun: Lun on target
5580  *
5581  * This routine searches the list passed for the specified lun's device data.
5582  * This function does not hold locks, it is the responsibility of the caller
5583  * to ensure the proper lock is held before calling the function.
5584  *
5585  * Return codes:
5586  *   NULL - Error
5587  *   Pointer to lpfc_device_data - Success
5588  **/
5589 struct lpfc_device_data*
5590 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5591                        struct lpfc_name *vport_wwpn,
5592                        struct lpfc_name *target_wwpn, uint64_t lun)
5593 {
5594
5595         struct lpfc_device_data *lun_info;
5596
5597         if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5598             !phba->cfg_fof)
5599                 return NULL;
5600
5601         /* Check to see if the lun is already enabled for OAS. */
5602
5603         list_for_each_entry(lun_info, list, listentry) {
5604                 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5605                             sizeof(struct lpfc_name)) == 0) &&
5606                     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5607                             sizeof(struct lpfc_name)) == 0) &&
5608                     (lun_info->device_id.lun == lun))
5609                         return lun_info;
5610         }
5611
5612         return NULL;
5613 }
5614
5615 /**
5616  * lpfc_find_next_oas_lun - searches for the next oas lun
5617  * @pha: Pointer to host bus adapter structure.
5618  * @vport_wwpn: Pointer to vport's wwpn information
5619  * @target_wwpn: Pointer to target's wwpn information
5620  * @starting_lun: Pointer to the lun to start searching for
5621  * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5622  * @found_target_wwpn: Pointer to the found lun's target wwpn information
5623  * @found_lun: Pointer to the found lun.
5624  * @found_lun_status: Pointer to status of the found lun.
5625  *
5626  * This routine searches the luns list for the specified lun
5627  * or the first lun for the vport/target.  If the vport wwpn contains
5628  * a zero value then a specific vport is not specified. In this case
5629  * any vport which contains the lun will be considered a match.  If the
5630  * target wwpn contains a zero value then a specific target is not specified.
5631  * In this case any target which contains the lun will be considered a
5632  * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
5633  * are returned.  The function will also return the next lun if available.
5634  * If the next lun is not found, starting_lun parameter will be set to
5635  * NO_MORE_OAS_LUN.
5636  *
5637  * Return codes:
5638  *   non-0 - Error
5639  *   0 - Success
5640  **/
5641 bool
5642 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5643                        struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5644                        struct lpfc_name *found_vport_wwpn,
5645                        struct lpfc_name *found_target_wwpn,
5646                        uint64_t *found_lun,
5647                        uint32_t *found_lun_status,
5648                        uint32_t *found_lun_pri)
5649 {
5650
5651         unsigned long flags;
5652         struct lpfc_device_data *lun_info;
5653         struct lpfc_device_id *device_id;
5654         uint64_t lun;
5655         bool found = false;
5656
5657         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5658             !starting_lun || !found_vport_wwpn ||
5659             !found_target_wwpn || !found_lun || !found_lun_status ||
5660             (*starting_lun == NO_MORE_OAS_LUN) ||
5661             !phba->cfg_fof)
5662                 return false;
5663
5664         lun = *starting_lun;
5665         *found_lun = NO_MORE_OAS_LUN;
5666         *starting_lun = NO_MORE_OAS_LUN;
5667
5668         /* Search for lun or the lun closet in value */
5669
5670         spin_lock_irqsave(&phba->devicelock, flags);
5671         list_for_each_entry(lun_info, &phba->luns, listentry) {
5672                 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5673                      (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5674                             sizeof(struct lpfc_name)) == 0)) &&
5675                     ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5676                      (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5677                             sizeof(struct lpfc_name)) == 0)) &&
5678                     (lun_info->oas_enabled)) {
5679                         device_id = &lun_info->device_id;
5680                         if ((!found) &&
5681                             ((lun == FIND_FIRST_OAS_LUN) ||
5682                              (device_id->lun == lun))) {
5683                                 *found_lun = device_id->lun;
5684                                 memcpy(found_vport_wwpn,
5685                                        &device_id->vport_wwpn,
5686                                        sizeof(struct lpfc_name));
5687                                 memcpy(found_target_wwpn,
5688                                        &device_id->target_wwpn,
5689                                        sizeof(struct lpfc_name));
5690                                 if (lun_info->available)
5691                                         *found_lun_status =
5692                                                 OAS_LUN_STATUS_EXISTS;
5693                                 else
5694                                         *found_lun_status = 0;
5695                                 *found_lun_pri = lun_info->priority;
5696                                 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5697                                         memset(vport_wwpn, 0x0,
5698                                                sizeof(struct lpfc_name));
5699                                 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5700                                         memset(target_wwpn, 0x0,
5701                                                sizeof(struct lpfc_name));
5702                                 found = true;
5703                         } else if (found) {
5704                                 *starting_lun = device_id->lun;
5705                                 memcpy(vport_wwpn, &device_id->vport_wwpn,
5706                                        sizeof(struct lpfc_name));
5707                                 memcpy(target_wwpn, &device_id->target_wwpn,
5708                                        sizeof(struct lpfc_name));
5709                                 break;
5710                         }
5711                 }
5712         }
5713         spin_unlock_irqrestore(&phba->devicelock, flags);
5714         return found;
5715 }
5716
5717 /**
5718  * lpfc_enable_oas_lun - enables a lun for OAS operations
5719  * @pha: Pointer to host bus adapter structure.
5720  * @vport_wwpn: Pointer to vport's wwpn information
5721  * @target_wwpn: Pointer to target's wwpn information
5722  * @lun: Lun
5723  *
5724  * This routine enables a lun for oas operations.  The routines does so by
5725  * doing the following :
5726  *
5727  *   1) Checks to see if the device data for the lun has been created.
5728  *   2) If found, sets the OAS enabled flag if not set and returns.
5729  *   3) Otherwise, creates a device data structure.
5730  *   4) If successfully created, indicates the device data is for an OAS lun,
5731  *   indicates the lun is not available and add to the list of luns.
5732  *
5733  * Return codes:
5734  *   false - Error
5735  *   true - Success
5736  **/
5737 bool
5738 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5739                     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5740 {
5741
5742         struct lpfc_device_data *lun_info;
5743         unsigned long flags;
5744
5745         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5746             !phba->cfg_fof)
5747                 return false;
5748
5749         spin_lock_irqsave(&phba->devicelock, flags);
5750
5751         /* Check to see if the device data for the lun has been created */
5752         lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5753                                           target_wwpn, lun);
5754         if (lun_info) {
5755                 if (!lun_info->oas_enabled)
5756                         lun_info->oas_enabled = true;
5757                 lun_info->priority = pri;
5758                 spin_unlock_irqrestore(&phba->devicelock, flags);
5759                 return true;
5760         }
5761
5762         /* Create an lun info structure and add to list of luns */
5763         lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5764                                            pri, false);
5765         if (lun_info) {
5766                 lun_info->oas_enabled = true;
5767                 lun_info->priority = pri;
5768                 lun_info->available = false;
5769                 list_add_tail(&lun_info->listentry, &phba->luns);
5770                 spin_unlock_irqrestore(&phba->devicelock, flags);
5771                 return true;
5772         }
5773         spin_unlock_irqrestore(&phba->devicelock, flags);
5774         return false;
5775 }
5776
5777 /**
5778  * lpfc_disable_oas_lun - disables a lun for OAS operations
5779  * @pha: Pointer to host bus adapter structure.
5780  * @vport_wwpn: Pointer to vport's wwpn information
5781  * @target_wwpn: Pointer to target's wwpn information
5782  * @lun: Lun
5783  *
5784  * This routine disables a lun for oas operations.  The routines does so by
5785  * doing the following :
5786  *
5787  *   1) Checks to see if the device data for the lun is created.
5788  *   2) If present, clears the flag indicating this lun is for OAS.
5789  *   3) If the lun is not available by the system, the device data is
5790  *   freed.
5791  *
5792  * Return codes:
5793  *   false - Error
5794  *   true - Success
5795  **/
5796 bool
5797 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5798                      struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5799 {
5800
5801         struct lpfc_device_data *lun_info;
5802         unsigned long flags;
5803
5804         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5805             !phba->cfg_fof)
5806                 return false;
5807
5808         spin_lock_irqsave(&phba->devicelock, flags);
5809
5810         /* Check to see if the lun is available. */
5811         lun_info = __lpfc_get_device_data(phba,
5812                                           &phba->luns, vport_wwpn,
5813                                           target_wwpn, lun);
5814         if (lun_info) {
5815                 lun_info->oas_enabled = false;
5816                 lun_info->priority = pri;
5817                 if (!lun_info->available)
5818                         lpfc_delete_device_data(phba, lun_info);
5819                 spin_unlock_irqrestore(&phba->devicelock, flags);
5820                 return true;
5821         }
5822
5823         spin_unlock_irqrestore(&phba->devicelock, flags);
5824         return false;
5825 }
5826
5827 static int
5828 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5829 {
5830         return SCSI_MLQUEUE_HOST_BUSY;
5831 }
5832
5833 static int
5834 lpfc_no_handler(struct scsi_cmnd *cmnd)
5835 {
5836         return FAILED;
5837 }
5838
5839 static int
5840 lpfc_no_slave(struct scsi_device *sdev)
5841 {
5842         return -ENODEV;
5843 }
5844
5845 struct scsi_host_template lpfc_template_nvme = {
5846         .module                 = THIS_MODULE,
5847         .name                   = LPFC_DRIVER_NAME,
5848         .proc_name              = LPFC_DRIVER_NAME,
5849         .info                   = lpfc_info,
5850         .queuecommand           = lpfc_no_command,
5851         .eh_abort_handler       = lpfc_no_handler,
5852         .eh_device_reset_handler = lpfc_no_handler,
5853         .eh_target_reset_handler = lpfc_no_handler,
5854         .eh_bus_reset_handler   = lpfc_no_handler,
5855         .eh_host_reset_handler  = lpfc_no_handler,
5856         .slave_alloc            = lpfc_no_slave,
5857         .slave_configure        = lpfc_no_slave,
5858         .scan_finished          = lpfc_scan_finished,
5859         .this_id                = -1,
5860         .sg_tablesize           = 1,
5861         .cmd_per_lun            = 1,
5862         .shost_attrs            = lpfc_hba_attrs,
5863         .max_sectors            = 0xFFFF,
5864         .vendor_id              = LPFC_NL_VENDOR_ID,
5865         .track_queue_depth      = 0,
5866 };
5867
5868 struct scsi_host_template lpfc_template_no_hr = {
5869         .module                 = THIS_MODULE,
5870         .name                   = LPFC_DRIVER_NAME,
5871         .proc_name              = LPFC_DRIVER_NAME,
5872         .info                   = lpfc_info,
5873         .queuecommand           = lpfc_queuecommand,
5874         .eh_timed_out           = fc_eh_timed_out,
5875         .eh_abort_handler       = lpfc_abort_handler,
5876         .eh_device_reset_handler = lpfc_device_reset_handler,
5877         .eh_target_reset_handler = lpfc_target_reset_handler,
5878         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
5879         .slave_alloc            = lpfc_slave_alloc,
5880         .slave_configure        = lpfc_slave_configure,
5881         .slave_destroy          = lpfc_slave_destroy,
5882         .scan_finished          = lpfc_scan_finished,
5883         .this_id                = -1,
5884         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
5885         .cmd_per_lun            = LPFC_CMD_PER_LUN,
5886         .shost_attrs            = lpfc_hba_attrs,
5887         .max_sectors            = 0xFFFF,
5888         .vendor_id              = LPFC_NL_VENDOR_ID,
5889         .change_queue_depth     = scsi_change_queue_depth,
5890         .track_queue_depth      = 1,
5891 };
5892
5893 struct scsi_host_template lpfc_template = {
5894         .module                 = THIS_MODULE,
5895         .name                   = LPFC_DRIVER_NAME,
5896         .proc_name              = LPFC_DRIVER_NAME,
5897         .info                   = lpfc_info,
5898         .queuecommand           = lpfc_queuecommand,
5899         .eh_timed_out           = fc_eh_timed_out,
5900         .eh_abort_handler       = lpfc_abort_handler,
5901         .eh_device_reset_handler = lpfc_device_reset_handler,
5902         .eh_target_reset_handler = lpfc_target_reset_handler,
5903         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
5904         .eh_host_reset_handler  = lpfc_host_reset_handler,
5905         .slave_alloc            = lpfc_slave_alloc,
5906         .slave_configure        = lpfc_slave_configure,
5907         .slave_destroy          = lpfc_slave_destroy,
5908         .scan_finished          = lpfc_scan_finished,
5909         .this_id                = -1,
5910         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
5911         .cmd_per_lun            = LPFC_CMD_PER_LUN,
5912         .shost_attrs            = lpfc_hba_attrs,
5913         .max_sectors            = 0xFFFF,
5914         .vendor_id              = LPFC_NL_VENDOR_ID,
5915         .change_queue_depth     = scsi_change_queue_depth,
5916         .track_queue_depth      = 1,
5917 };
5918
5919 struct scsi_host_template lpfc_vport_template = {
5920         .module                 = THIS_MODULE,
5921         .name                   = LPFC_DRIVER_NAME,
5922         .proc_name              = LPFC_DRIVER_NAME,
5923         .info                   = lpfc_info,
5924         .queuecommand           = lpfc_queuecommand,
5925         .eh_timed_out           = fc_eh_timed_out,
5926         .eh_abort_handler       = lpfc_abort_handler,
5927         .eh_device_reset_handler = lpfc_device_reset_handler,
5928         .eh_target_reset_handler = lpfc_target_reset_handler,
5929         .slave_alloc            = lpfc_slave_alloc,
5930         .slave_configure        = lpfc_slave_configure,
5931         .slave_destroy          = lpfc_slave_destroy,
5932         .scan_finished          = lpfc_scan_finished,
5933         .this_id                = -1,
5934         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
5935         .cmd_per_lun            = LPFC_CMD_PER_LUN,
5936         .shost_attrs            = lpfc_vport_attrs,
5937         .max_sectors            = 0xFFFF,
5938         .change_queue_depth     = scsi_change_queue_depth,
5939         .track_queue_depth      = 1,
5940 };