]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/qedf/qedf_io.c
6ca583bdde23ca4b7603da6901822e252e133d21
[linux.git] / drivers / scsi / qedf / qedf_io.c
1 /*
2  *  QLogic FCoE Offload Driver
3  *  Copyright (c) 2016-2018 Cavium Inc.
4  *
5  *  This software is available under the terms of the GNU General Public License
6  *  (GPL) Version 2, available from the file COPYING in the main directory of
7  *  this source tree.
8  */
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 #include "qedf.h"
12 #include <scsi/scsi_tcq.h>
13
14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15         unsigned int timer_msec)
16 {
17         queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18             msecs_to_jiffies(timer_msec));
19 }
20
21 static void qedf_cmd_timeout(struct work_struct *work)
22 {
23
24         struct qedf_ioreq *io_req =
25             container_of(work, struct qedf_ioreq, timeout_work.work);
26         struct qedf_ctx *qedf;
27         struct qedf_rport *fcport;
28         u8 op = 0;
29
30         if (io_req == NULL) {
31                 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
32                 return;
33         }
34
35         fcport = io_req->fcport;
36         if (io_req->fcport == NULL) {
37                 QEDF_INFO(NULL, QEDF_LOG_IO,  "fcport is NULL.\n");
38                 return;
39         }
40
41         qedf = fcport->qedf;
42
43         switch (io_req->cmd_type) {
44         case QEDF_ABTS:
45                 if (qedf == NULL) {
46                         QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
47                             io_req->xid);
48                         return;
49                 }
50
51                 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
52                     io_req->xid);
53                 /* Cleanup timed out ABTS */
54                 qedf_initiate_cleanup(io_req, true);
55                 complete(&io_req->abts_done);
56
57                 /*
58                  * Need to call kref_put for reference taken when initiate_abts
59                  * was called since abts_compl won't be called now that we've
60                  * cleaned up the task.
61                  */
62                 kref_put(&io_req->refcount, qedf_release_cmd);
63
64                 /*
65                  * Now that the original I/O and the ABTS are complete see
66                  * if we need to reconnect to the target.
67                  */
68                 qedf_restart_rport(fcport);
69                 break;
70         case QEDF_ELS:
71                 kref_get(&io_req->refcount);
72                 /*
73                  * Don't attempt to clean an ELS timeout as any subseqeunt
74                  * ABTS or cleanup requests just hang.  For now just free
75                  * the resources of the original I/O and the RRQ
76                  */
77                 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
78                           io_req->xid);
79                 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
80                 /* Call callback function to complete command */
81                 if (io_req->cb_func && io_req->cb_arg) {
82                         op = io_req->cb_arg->op;
83                         io_req->cb_func(io_req->cb_arg);
84                         io_req->cb_arg = NULL;
85                 }
86                 qedf_initiate_cleanup(io_req, true);
87                 kref_put(&io_req->refcount, qedf_release_cmd);
88                 break;
89         case QEDF_SEQ_CLEANUP:
90                 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
91                     "xid=0x%x.\n", io_req->xid);
92                 qedf_initiate_cleanup(io_req, true);
93                 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
94                 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
95                 break;
96         default:
97                 break;
98         }
99 }
100
101 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
102 {
103         struct io_bdt *bdt_info;
104         struct qedf_ctx *qedf = cmgr->qedf;
105         size_t bd_tbl_sz;
106         u16 min_xid = QEDF_MIN_XID;
107         u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
108         int num_ios;
109         int i;
110         struct qedf_ioreq *io_req;
111
112         num_ios = max_xid - min_xid + 1;
113
114         /* Free fcoe_bdt_ctx structures */
115         if (!cmgr->io_bdt_pool)
116                 goto free_cmd_pool;
117
118         bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
119         for (i = 0; i < num_ios; i++) {
120                 bdt_info = cmgr->io_bdt_pool[i];
121                 if (bdt_info->bd_tbl) {
122                         dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
123                             bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
124                         bdt_info->bd_tbl = NULL;
125                 }
126         }
127
128         /* Destroy io_bdt pool */
129         for (i = 0; i < num_ios; i++) {
130                 kfree(cmgr->io_bdt_pool[i]);
131                 cmgr->io_bdt_pool[i] = NULL;
132         }
133
134         kfree(cmgr->io_bdt_pool);
135         cmgr->io_bdt_pool = NULL;
136
137 free_cmd_pool:
138
139         for (i = 0; i < num_ios; i++) {
140                 io_req = &cmgr->cmds[i];
141                 kfree(io_req->sgl_task_params);
142                 kfree(io_req->task_params);
143                 /* Make sure we free per command sense buffer */
144                 if (io_req->sense_buffer)
145                         dma_free_coherent(&qedf->pdev->dev,
146                             QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
147                             io_req->sense_buffer_dma);
148                 cancel_delayed_work_sync(&io_req->rrq_work);
149         }
150
151         /* Free command manager itself */
152         vfree(cmgr);
153 }
154
155 static void qedf_handle_rrq(struct work_struct *work)
156 {
157         struct qedf_ioreq *io_req =
158             container_of(work, struct qedf_ioreq, rrq_work.work);
159
160         qedf_send_rrq(io_req);
161
162 }
163
164 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
165 {
166         struct qedf_cmd_mgr *cmgr;
167         struct io_bdt *bdt_info;
168         struct qedf_ioreq *io_req;
169         u16 xid;
170         int i;
171         int num_ios;
172         u16 min_xid = QEDF_MIN_XID;
173         u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
174
175         /* Make sure num_queues is already set before calling this function */
176         if (!qedf->num_queues) {
177                 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
178                 return NULL;
179         }
180
181         if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
182                 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
183                            "max_xid 0x%x.\n", min_xid, max_xid);
184                 return NULL;
185         }
186
187         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
188                    "0x%x.\n", min_xid, max_xid);
189
190         num_ios = max_xid - min_xid + 1;
191
192         cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
193         if (!cmgr) {
194                 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
195                 return NULL;
196         }
197
198         cmgr->qedf = qedf;
199         spin_lock_init(&cmgr->lock);
200
201         /*
202          * Initialize I/O request fields.
203          */
204         xid = QEDF_MIN_XID;
205
206         for (i = 0; i < num_ios; i++) {
207                 io_req = &cmgr->cmds[i];
208                 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
209
210                 io_req->xid = xid++;
211
212                 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
213
214                 /* Allocate DMA memory to hold sense buffer */
215                 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
216                     QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
217                     GFP_KERNEL);
218                 if (!io_req->sense_buffer)
219                         goto mem_err;
220
221                 /* Allocate task parameters to pass to f/w init funcions */
222                 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
223                                               GFP_KERNEL);
224                 if (!io_req->task_params) {
225                         QEDF_ERR(&(qedf->dbg_ctx),
226                                  "Failed to allocate task_params for xid=0x%x\n",
227                                  i);
228                         goto mem_err;
229                 }
230
231                 /*
232                  * Allocate scatter/gather list info to pass to f/w init
233                  * functions.
234                  */
235                 io_req->sgl_task_params = kzalloc(
236                     sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
237                 if (!io_req->sgl_task_params) {
238                         QEDF_ERR(&(qedf->dbg_ctx),
239                                  "Failed to allocate sgl_task_params for xid=0x%x\n",
240                                  i);
241                         goto mem_err;
242                 }
243         }
244
245         /* Allocate pool of io_bdts - one for each qedf_ioreq */
246         cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
247             GFP_KERNEL);
248
249         if (!cmgr->io_bdt_pool) {
250                 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
251                 goto mem_err;
252         }
253
254         for (i = 0; i < num_ios; i++) {
255                 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
256                     GFP_KERNEL);
257                 if (!cmgr->io_bdt_pool[i]) {
258                         QEDF_WARN(&(qedf->dbg_ctx),
259                                   "Failed to alloc io_bdt_pool[%d].\n", i);
260                         goto mem_err;
261                 }
262         }
263
264         for (i = 0; i < num_ios; i++) {
265                 bdt_info = cmgr->io_bdt_pool[i];
266                 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
267                     QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
268                     &bdt_info->bd_tbl_dma, GFP_KERNEL);
269                 if (!bdt_info->bd_tbl) {
270                         QEDF_WARN(&(qedf->dbg_ctx),
271                                   "Failed to alloc bdt_tbl[%d].\n", i);
272                         goto mem_err;
273                 }
274         }
275         atomic_set(&cmgr->free_list_cnt, num_ios);
276         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
277             "cmgr->free_list_cnt=%d.\n",
278             atomic_read(&cmgr->free_list_cnt));
279
280         return cmgr;
281
282 mem_err:
283         qedf_cmd_mgr_free(cmgr);
284         return NULL;
285 }
286
287 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
288 {
289         struct qedf_ctx *qedf = fcport->qedf;
290         struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
291         struct qedf_ioreq *io_req = NULL;
292         struct io_bdt *bd_tbl;
293         u16 xid;
294         uint32_t free_sqes;
295         int i;
296         unsigned long flags;
297
298         free_sqes = atomic_read(&fcport->free_sqes);
299
300         if (!free_sqes) {
301                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
302                     "Returning NULL, free_sqes=%d.\n ",
303                     free_sqes);
304                 goto out_failed;
305         }
306
307         /* Limit the number of outstanding R/W tasks */
308         if ((atomic_read(&fcport->num_active_ios) >=
309             NUM_RW_TASKS_PER_CONNECTION)) {
310                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
311                     "Returning NULL, num_active_ios=%d.\n",
312                     atomic_read(&fcport->num_active_ios));
313                 goto out_failed;
314         }
315
316         /* Limit global TIDs certain tasks */
317         if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
318                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
319                     "Returning NULL, free_list_cnt=%d.\n",
320                     atomic_read(&cmd_mgr->free_list_cnt));
321                 goto out_failed;
322         }
323
324         spin_lock_irqsave(&cmd_mgr->lock, flags);
325         for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
326                 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
327                 cmd_mgr->idx++;
328                 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
329                         cmd_mgr->idx = 0;
330
331                 /* Check to make sure command was previously freed */
332                 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
333                         break;
334         }
335
336         if (i == FCOE_PARAMS_NUM_TASKS) {
337                 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
338                 goto out_failed;
339         }
340
341         set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
342         spin_unlock_irqrestore(&cmd_mgr->lock, flags);
343
344         atomic_inc(&fcport->num_active_ios);
345         atomic_dec(&fcport->free_sqes);
346         xid = io_req->xid;
347         atomic_dec(&cmd_mgr->free_list_cnt);
348
349         io_req->cmd_mgr = cmd_mgr;
350         io_req->fcport = fcport;
351
352         /* Hold the io_req against deletion */
353         kref_init(&io_req->refcount);
354
355         /* Bind io_bdt for this io_req */
356         /* Have a static link between io_req and io_bdt_pool */
357         bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
358         if (bd_tbl == NULL) {
359                 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
360                 kref_put(&io_req->refcount, qedf_release_cmd);
361                 goto out_failed;
362         }
363         bd_tbl->io_req = io_req;
364         io_req->cmd_type = cmd_type;
365         io_req->tm_flags = 0;
366
367         /* Reset sequence offset data */
368         io_req->rx_buf_off = 0;
369         io_req->tx_buf_off = 0;
370         io_req->rx_id = 0xffff; /* No OX_ID */
371
372         return io_req;
373
374 out_failed:
375         /* Record failure for stats and return NULL to caller */
376         qedf->alloc_failures++;
377         return NULL;
378 }
379
380 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
381 {
382         struct qedf_mp_req *mp_req = &(io_req->mp_req);
383         struct qedf_ctx *qedf = io_req->fcport->qedf;
384         uint64_t sz = sizeof(struct scsi_sge);
385
386         /* clear tm flags */
387         if (mp_req->mp_req_bd) {
388                 dma_free_coherent(&qedf->pdev->dev, sz,
389                     mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
390                 mp_req->mp_req_bd = NULL;
391         }
392         if (mp_req->mp_resp_bd) {
393                 dma_free_coherent(&qedf->pdev->dev, sz,
394                     mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
395                 mp_req->mp_resp_bd = NULL;
396         }
397         if (mp_req->req_buf) {
398                 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
399                     mp_req->req_buf, mp_req->req_buf_dma);
400                 mp_req->req_buf = NULL;
401         }
402         if (mp_req->resp_buf) {
403                 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
404                     mp_req->resp_buf, mp_req->resp_buf_dma);
405                 mp_req->resp_buf = NULL;
406         }
407 }
408
409 void qedf_release_cmd(struct kref *ref)
410 {
411         struct qedf_ioreq *io_req =
412             container_of(ref, struct qedf_ioreq, refcount);
413         struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
414         struct qedf_rport *fcport = io_req->fcport;
415
416         if (io_req->cmd_type == QEDF_ELS ||
417             io_req->cmd_type == QEDF_TASK_MGMT_CMD)
418                 qedf_free_mp_resc(io_req);
419
420         atomic_inc(&cmd_mgr->free_list_cnt);
421         atomic_dec(&fcport->num_active_ios);
422         if (atomic_read(&fcport->num_active_ios) < 0)
423                 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
424
425         /* Increment task retry identifier now that the request is released */
426         io_req->task_retry_identifier++;
427
428         clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
429 }
430
431 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
432         int bd_index)
433 {
434         struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
435         int frag_size, sg_frags;
436
437         sg_frags = 0;
438         while (sg_len) {
439                 if (sg_len > QEDF_BD_SPLIT_SZ)
440                         frag_size = QEDF_BD_SPLIT_SZ;
441                 else
442                         frag_size = sg_len;
443                 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
444                 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
445                 bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
446
447                 addr += (u64)frag_size;
448                 sg_frags++;
449                 sg_len -= frag_size;
450         }
451         return sg_frags;
452 }
453
454 static int qedf_map_sg(struct qedf_ioreq *io_req)
455 {
456         struct scsi_cmnd *sc = io_req->sc_cmd;
457         struct Scsi_Host *host = sc->device->host;
458         struct fc_lport *lport = shost_priv(host);
459         struct qedf_ctx *qedf = lport_priv(lport);
460         struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
461         struct scatterlist *sg;
462         int byte_count = 0;
463         int sg_count = 0;
464         int bd_count = 0;
465         int sg_frags;
466         unsigned int sg_len;
467         u64 addr, end_addr;
468         int i;
469
470         sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
471             scsi_sg_count(sc), sc->sc_data_direction);
472
473         sg = scsi_sglist(sc);
474
475         /*
476          * New condition to send single SGE as cached-SGL with length less
477          * than 64k.
478          */
479         if ((sg_count == 1) && (sg_dma_len(sg) <=
480             QEDF_MAX_SGLEN_FOR_CACHESGL)) {
481                 sg_len = sg_dma_len(sg);
482                 addr = (u64)sg_dma_address(sg);
483
484                 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
485                 bd[bd_count].sge_addr.hi = (addr >> 32);
486                 bd[bd_count].sge_len = (u16)sg_len;
487
488                 return ++bd_count;
489         }
490
491         scsi_for_each_sg(sc, sg, sg_count, i) {
492                 sg_len = sg_dma_len(sg);
493                 addr = (u64)sg_dma_address(sg);
494                 end_addr = (u64)(addr + sg_len);
495
496                 /*
497                  * First s/g element in the list so check if the end_addr
498                  * is paged aligned. Also check to make sure the length is
499                  * at least page size.
500                  */
501                 if ((i == 0) && (sg_count > 1) &&
502                     ((end_addr % QEDF_PAGE_SIZE) ||
503                     sg_len < QEDF_PAGE_SIZE))
504                         io_req->use_slowpath = true;
505                 /*
506                  * Last s/g element so check if the start address is paged
507                  * aligned.
508                  */
509                 else if ((i == (sg_count - 1)) && (sg_count > 1) &&
510                     (addr % QEDF_PAGE_SIZE))
511                         io_req->use_slowpath = true;
512                 /*
513                  * Intermediate s/g element so check if start and end address
514                  * is page aligned.
515                  */
516                 else if ((i != 0) && (i != (sg_count - 1)) &&
517                     ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
518                         io_req->use_slowpath = true;
519
520                 if (sg_len > QEDF_MAX_BD_LEN) {
521                         sg_frags = qedf_split_bd(io_req, addr, sg_len,
522                             bd_count);
523                 } else {
524                         sg_frags = 1;
525                         bd[bd_count].sge_addr.lo = U64_LO(addr);
526                         bd[bd_count].sge_addr.hi  = U64_HI(addr);
527                         bd[bd_count].sge_len = (uint16_t)sg_len;
528                 }
529
530                 bd_count += sg_frags;
531                 byte_count += sg_len;
532         }
533
534         if (byte_count != scsi_bufflen(sc))
535                 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
536                           "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
537                            scsi_bufflen(sc), io_req->xid);
538
539         return bd_count;
540 }
541
542 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
543 {
544         struct scsi_cmnd *sc = io_req->sc_cmd;
545         struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
546         int bd_count;
547
548         if (scsi_sg_count(sc)) {
549                 bd_count = qedf_map_sg(io_req);
550                 if (bd_count == 0)
551                         return -ENOMEM;
552         } else {
553                 bd_count = 0;
554                 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
555                 bd[0].sge_len = 0;
556         }
557         io_req->bd_tbl->bd_valid = bd_count;
558
559         return 0;
560 }
561
562 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
563                                   struct fcp_cmnd *fcp_cmnd)
564 {
565         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
566
567         /* fcp_cmnd is 32 bytes */
568         memset(fcp_cmnd, 0, FCP_CMND_LEN);
569
570         /* 8 bytes: SCSI LUN info */
571         int_to_scsilun(sc_cmd->device->lun,
572                         (struct scsi_lun *)&fcp_cmnd->fc_lun);
573
574         /* 4 bytes: flag info */
575         fcp_cmnd->fc_pri_ta = 0;
576         fcp_cmnd->fc_tm_flags = io_req->tm_flags;
577         fcp_cmnd->fc_flags = io_req->io_req_flags;
578         fcp_cmnd->fc_cmdref = 0;
579
580         /* Populate data direction */
581         if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
582                 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
583         } else {
584                 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
585                         fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
586                 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
587                         fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
588         }
589
590         fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
591
592         /* 16 bytes: CDB information */
593         if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
594                 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
595
596         /* 4 bytes: FCP data length */
597         fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
598 }
599
600 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
601         struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
602         struct fcoe_wqe *sqe)
603 {
604         enum fcoe_task_type task_type;
605         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
606         struct io_bdt *bd_tbl = io_req->bd_tbl;
607         u8 fcp_cmnd[32];
608         u32 tmp_fcp_cmnd[8];
609         int bd_count = 0;
610         struct qedf_ctx *qedf = fcport->qedf;
611         uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
612         struct regpair sense_data_buffer_phys_addr;
613         u32 tx_io_size = 0;
614         u32 rx_io_size = 0;
615         int i, cnt;
616
617         /* Note init_initiator_rw_fcoe_task memsets the task context */
618         io_req->task = task_ctx;
619         memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
620         memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
621         memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
622
623         /* Set task type bassed on DMA directio of command */
624         if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
625                 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
626         } else {
627                 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
628                         task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
629                         tx_io_size = io_req->data_xfer_len;
630                 } else {
631                         task_type = FCOE_TASK_TYPE_READ_INITIATOR;
632                         rx_io_size = io_req->data_xfer_len;
633                 }
634         }
635
636         /* Setup the fields for fcoe_task_params */
637         io_req->task_params->context = task_ctx;
638         io_req->task_params->sqe = sqe;
639         io_req->task_params->task_type = task_type;
640         io_req->task_params->tx_io_size = tx_io_size;
641         io_req->task_params->rx_io_size = rx_io_size;
642         io_req->task_params->conn_cid = fcport->fw_cid;
643         io_req->task_params->itid = io_req->xid;
644         io_req->task_params->cq_rss_number = cq_idx;
645         io_req->task_params->is_tape_device = fcport->dev_type;
646
647         /* Fill in information for scatter/gather list */
648         if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
649                 bd_count = bd_tbl->bd_valid;
650                 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
651                 io_req->sgl_task_params->sgl_phys_addr.lo =
652                         U64_LO(bd_tbl->bd_tbl_dma);
653                 io_req->sgl_task_params->sgl_phys_addr.hi =
654                         U64_HI(bd_tbl->bd_tbl_dma);
655                 io_req->sgl_task_params->num_sges = bd_count;
656                 io_req->sgl_task_params->total_buffer_size =
657                     scsi_bufflen(io_req->sc_cmd);
658                 io_req->sgl_task_params->small_mid_sge =
659                         io_req->use_slowpath;
660         }
661
662         /* Fill in physical address of sense buffer */
663         sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
664         sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
665
666         /* fill FCP_CMND IU */
667         qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
668
669         /* Swap fcp_cmnd since FC is big endian */
670         cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
671         for (i = 0; i < cnt; i++) {
672                 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
673         }
674         memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
675
676         init_initiator_rw_fcoe_task(io_req->task_params,
677                                     io_req->sgl_task_params,
678                                     sense_data_buffer_phys_addr,
679                                     io_req->task_retry_identifier, fcp_cmnd);
680
681         /* Increment SGL type counters */
682         if (bd_count == 1) {
683                 qedf->single_sge_ios++;
684                 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
685         } else if (io_req->use_slowpath) {
686                 qedf->slow_sge_ios++;
687                 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
688         } else {
689                 qedf->fast_sge_ios++;
690                 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
691         }
692 }
693
694 void qedf_init_mp_task(struct qedf_ioreq *io_req,
695         struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
696 {
697         struct qedf_mp_req *mp_req = &(io_req->mp_req);
698         struct qedf_rport *fcport = io_req->fcport;
699         struct qedf_ctx *qedf = io_req->fcport->qedf;
700         struct fc_frame_header *fc_hdr;
701         struct fcoe_tx_mid_path_params task_fc_hdr;
702         struct scsi_sgl_task_params tx_sgl_task_params;
703         struct scsi_sgl_task_params rx_sgl_task_params;
704
705         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
706                   "Initializing MP task for cmd_type=%d\n",
707                   io_req->cmd_type);
708
709         qedf->control_requests++;
710
711         memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
712         memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
713         memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
714         memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
715
716         /* Setup the task from io_req for easy reference */
717         io_req->task = task_ctx;
718
719         /* Setup the fields for fcoe_task_params */
720         io_req->task_params->context = task_ctx;
721         io_req->task_params->sqe = sqe;
722         io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
723         io_req->task_params->tx_io_size = io_req->data_xfer_len;
724         /* rx_io_size tells the f/w how large a response buffer we have */
725         io_req->task_params->rx_io_size = PAGE_SIZE;
726         io_req->task_params->conn_cid = fcport->fw_cid;
727         io_req->task_params->itid = io_req->xid;
728         /* Return middle path commands on CQ 0 */
729         io_req->task_params->cq_rss_number = 0;
730         io_req->task_params->is_tape_device = fcport->dev_type;
731
732         fc_hdr = &(mp_req->req_fc_hdr);
733         /* Set OX_ID and RX_ID based on driver task id */
734         fc_hdr->fh_ox_id = io_req->xid;
735         fc_hdr->fh_rx_id = htons(0xffff);
736
737         /* Set up FC header information */
738         task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
739         task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
740         task_fc_hdr.type = fc_hdr->fh_type;
741         task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
742         task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
743         task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
744         task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
745
746         /* Set up s/g list parameters for request buffer */
747         tx_sgl_task_params.sgl = mp_req->mp_req_bd;
748         tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
749         tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
750         tx_sgl_task_params.num_sges = 1;
751         /* Set PAGE_SIZE for now since sg element is that size ??? */
752         tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
753         tx_sgl_task_params.small_mid_sge = 0;
754
755         /* Set up s/g list parameters for request buffer */
756         rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
757         rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
758         rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
759         rx_sgl_task_params.num_sges = 1;
760         /* Set PAGE_SIZE for now since sg element is that size ??? */
761         rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
762         rx_sgl_task_params.small_mid_sge = 0;
763
764
765         /*
766          * Last arg is 0 as previous code did not set that we wanted the
767          * fc header information.
768          */
769         init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
770                                                      &task_fc_hdr,
771                                                      &tx_sgl_task_params,
772                                                      &rx_sgl_task_params, 0);
773
774         /* Midpath requests always consume 1 SGE */
775         qedf->single_sge_ios++;
776 }
777
778 /* Presumed that fcport->rport_lock is held */
779 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
780 {
781         uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
782         u16 rval;
783
784         rval = fcport->sq_prod_idx;
785
786         /* Adjust ring index */
787         fcport->sq_prod_idx++;
788         fcport->fw_sq_prod_idx++;
789         if (fcport->sq_prod_idx == total_sqe)
790                 fcport->sq_prod_idx = 0;
791
792         return rval;
793 }
794
795 void qedf_ring_doorbell(struct qedf_rport *fcport)
796 {
797         struct fcoe_db_data dbell = { 0 };
798
799         dbell.agg_flags = 0;
800
801         dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
802         dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
803         dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
804             FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
805
806         dbell.sq_prod = fcport->fw_sq_prod_idx;
807         writel(*(u32 *)&dbell, fcport->p_doorbell);
808         /* Make sure SQ index is updated so f/w prcesses requests in order */
809         wmb();
810         mmiowb();
811 }
812
813 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
814                           int8_t direction)
815 {
816         struct qedf_ctx *qedf = fcport->qedf;
817         struct qedf_io_log *io_log;
818         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
819         unsigned long flags;
820         uint8_t op;
821
822         spin_lock_irqsave(&qedf->io_trace_lock, flags);
823
824         io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
825         io_log->direction = direction;
826         io_log->task_id = io_req->xid;
827         io_log->port_id = fcport->rdata->ids.port_id;
828         io_log->lun = sc_cmd->device->lun;
829         io_log->op = op = sc_cmd->cmnd[0];
830         io_log->lba[0] = sc_cmd->cmnd[2];
831         io_log->lba[1] = sc_cmd->cmnd[3];
832         io_log->lba[2] = sc_cmd->cmnd[4];
833         io_log->lba[3] = sc_cmd->cmnd[5];
834         io_log->bufflen = scsi_bufflen(sc_cmd);
835         io_log->sg_count = scsi_sg_count(sc_cmd);
836         io_log->result = sc_cmd->result;
837         io_log->jiffies = jiffies;
838         io_log->refcount = kref_read(&io_req->refcount);
839
840         if (direction == QEDF_IO_TRACE_REQ) {
841                 /* For requests we only care abot the submission CPU */
842                 io_log->req_cpu = io_req->cpu;
843                 io_log->int_cpu = 0;
844                 io_log->rsp_cpu = 0;
845         } else if (direction == QEDF_IO_TRACE_RSP) {
846                 io_log->req_cpu = io_req->cpu;
847                 io_log->int_cpu = io_req->int_cpu;
848                 io_log->rsp_cpu = smp_processor_id();
849         }
850
851         io_log->sge_type = io_req->sge_type;
852
853         qedf->io_trace_idx++;
854         if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
855                 qedf->io_trace_idx = 0;
856
857         spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
858 }
859
860 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
861 {
862         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
863         struct Scsi_Host *host = sc_cmd->device->host;
864         struct fc_lport *lport = shost_priv(host);
865         struct qedf_ctx *qedf = lport_priv(lport);
866         struct e4_fcoe_task_context *task_ctx;
867         u16 xid;
868         enum fcoe_task_type req_type = 0;
869         struct fcoe_wqe *sqe;
870         u16 sqe_idx;
871
872         /* Initialize rest of io_req fileds */
873         io_req->data_xfer_len = scsi_bufflen(sc_cmd);
874         sc_cmd->SCp.ptr = (char *)io_req;
875         io_req->use_slowpath = false; /* Assume fast SGL by default */
876
877         /* Record which cpu this request is associated with */
878         io_req->cpu = smp_processor_id();
879
880         if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
881                 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
882                 io_req->io_req_flags = QEDF_READ;
883                 qedf->input_requests++;
884         } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
885                 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
886                 io_req->io_req_flags = QEDF_WRITE;
887                 qedf->output_requests++;
888         } else {
889                 io_req->io_req_flags = 0;
890                 qedf->control_requests++;
891         }
892
893         xid = io_req->xid;
894
895         /* Build buffer descriptor list for firmware from sg list */
896         if (qedf_build_bd_list_from_sg(io_req)) {
897                 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
898                 kref_put(&io_req->refcount, qedf_release_cmd);
899                 return -EAGAIN;
900         }
901
902         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
903                 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
904                 kref_put(&io_req->refcount, qedf_release_cmd);
905         }
906
907         /* Obtain free SQE */
908         sqe_idx = qedf_get_sqe_idx(fcport);
909         sqe = &fcport->sq[sqe_idx];
910         memset(sqe, 0, sizeof(struct fcoe_wqe));
911
912         /* Get the task context */
913         task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
914         if (!task_ctx) {
915                 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
916                            xid);
917                 kref_put(&io_req->refcount, qedf_release_cmd);
918                 return -EINVAL;
919         }
920
921         qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
922
923         /* Ring doorbell */
924         qedf_ring_doorbell(fcport);
925
926         if (qedf_io_tracing && io_req->sc_cmd)
927                 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
928
929         return false;
930 }
931
932 int
933 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
934 {
935         struct fc_lport *lport = shost_priv(host);
936         struct qedf_ctx *qedf = lport_priv(lport);
937         struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
938         struct fc_rport_libfc_priv *rp = rport->dd_data;
939         struct qedf_rport *fcport;
940         struct qedf_ioreq *io_req;
941         int rc = 0;
942         int rval;
943         unsigned long flags = 0;
944
945
946         if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
947             test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
948                 sc_cmd->result = DID_NO_CONNECT << 16;
949                 sc_cmd->scsi_done(sc_cmd);
950                 return 0;
951         }
952
953         if (!qedf->pdev->msix_enabled) {
954                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
955                     "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
956                     sc_cmd);
957                 sc_cmd->result = DID_NO_CONNECT << 16;
958                 sc_cmd->scsi_done(sc_cmd);
959                 return 0;
960         }
961
962         rval = fc_remote_port_chkready(rport);
963         if (rval) {
964                 sc_cmd->result = rval;
965                 sc_cmd->scsi_done(sc_cmd);
966                 return 0;
967         }
968
969         /* Retry command if we are doing a qed drain operation */
970         if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
971                 rc = SCSI_MLQUEUE_HOST_BUSY;
972                 goto exit_qcmd;
973         }
974
975         if (lport->state != LPORT_ST_READY ||
976             atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
977                 rc = SCSI_MLQUEUE_HOST_BUSY;
978                 goto exit_qcmd;
979         }
980
981         /* rport and tgt are allocated together, so tgt should be non-NULL */
982         fcport = (struct qedf_rport *)&rp[1];
983
984         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
985                 /*
986                  * Session is not offloaded yet. Let SCSI-ml retry
987                  * the command.
988                  */
989                 rc = SCSI_MLQUEUE_TARGET_BUSY;
990                 goto exit_qcmd;
991         }
992         if (fcport->retry_delay_timestamp) {
993                 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
994                         fcport->retry_delay_timestamp = 0;
995                 } else {
996                         /* If retry_delay timer is active, flow off the ML */
997                         rc = SCSI_MLQUEUE_TARGET_BUSY;
998                         goto exit_qcmd;
999                 }
1000         }
1001
1002         io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1003         if (!io_req) {
1004                 rc = SCSI_MLQUEUE_HOST_BUSY;
1005                 goto exit_qcmd;
1006         }
1007
1008         io_req->sc_cmd = sc_cmd;
1009
1010         /* Take fcport->rport_lock for posting to fcport send queue */
1011         spin_lock_irqsave(&fcport->rport_lock, flags);
1012         if (qedf_post_io_req(fcport, io_req)) {
1013                 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1014                 /* Return SQE to pool */
1015                 atomic_inc(&fcport->free_sqes);
1016                 rc = SCSI_MLQUEUE_HOST_BUSY;
1017         }
1018         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1019
1020 exit_qcmd:
1021         return rc;
1022 }
1023
1024 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1025                                  struct fcoe_cqe_rsp_info *fcp_rsp)
1026 {
1027         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1028         struct qedf_ctx *qedf = io_req->fcport->qedf;
1029         u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1030         int fcp_sns_len = 0;
1031         int fcp_rsp_len = 0;
1032         uint8_t *rsp_info, *sense_data;
1033
1034         io_req->fcp_status = FC_GOOD;
1035         io_req->fcp_resid = 0;
1036         if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1037             FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1038                 io_req->fcp_resid = fcp_rsp->fcp_resid;
1039
1040         io_req->scsi_comp_flags = rsp_flags;
1041         CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1042             fcp_rsp->scsi_status_code;
1043
1044         if (rsp_flags &
1045             FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1046                 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1047
1048         if (rsp_flags &
1049             FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1050                 fcp_sns_len = fcp_rsp->fcp_sns_len;
1051
1052         io_req->fcp_rsp_len = fcp_rsp_len;
1053         io_req->fcp_sns_len = fcp_sns_len;
1054         rsp_info = sense_data = io_req->sense_buffer;
1055
1056         /* fetch fcp_rsp_code */
1057         if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1058                 /* Only for task management function */
1059                 io_req->fcp_rsp_code = rsp_info[3];
1060                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1061                     "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1062                 /* Adjust sense-data location. */
1063                 sense_data += fcp_rsp_len;
1064         }
1065
1066         if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1067                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1068                     "Truncating sense buffer\n");
1069                 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1070         }
1071
1072         /* The sense buffer can be NULL for TMF commands */
1073         if (sc_cmd->sense_buffer) {
1074                 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1075                 if (fcp_sns_len)
1076                         memcpy(sc_cmd->sense_buffer, sense_data,
1077                             fcp_sns_len);
1078         }
1079 }
1080
1081 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1082 {
1083         struct scsi_cmnd *sc = io_req->sc_cmd;
1084
1085         if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1086                 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1087                     scsi_sg_count(sc), sc->sc_data_direction);
1088                 io_req->bd_tbl->bd_valid = 0;
1089         }
1090 }
1091
1092 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1093         struct qedf_ioreq *io_req)
1094 {
1095         u16 xid, rval;
1096         struct e4_fcoe_task_context *task_ctx;
1097         struct scsi_cmnd *sc_cmd;
1098         struct fcoe_cqe_rsp_info *fcp_rsp;
1099         struct qedf_rport *fcport;
1100         int refcount;
1101         u16 scope, qualifier = 0;
1102         u8 fw_residual_flag = 0;
1103
1104         if (!io_req)
1105                 return;
1106         if (!cqe)
1107                 return;
1108
1109         xid = io_req->xid;
1110         task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1111         sc_cmd = io_req->sc_cmd;
1112         fcp_rsp = &cqe->cqe_info.rsp_info;
1113
1114         if (!sc_cmd) {
1115                 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1116                 return;
1117         }
1118
1119         if (!sc_cmd->SCp.ptr) {
1120                 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1121                     "another context.\n");
1122                 return;
1123         }
1124
1125         if (!sc_cmd->request) {
1126                 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1127                     "sc_cmd=%p.\n", sc_cmd);
1128                 return;
1129         }
1130
1131         if (!sc_cmd->request->q) {
1132                 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1133                    "is not valid, sc_cmd=%p.\n", sc_cmd);
1134                 return;
1135         }
1136
1137         fcport = io_req->fcport;
1138
1139         qedf_parse_fcp_rsp(io_req, fcp_rsp);
1140
1141         qedf_unmap_sg_list(qedf, io_req);
1142
1143         /* Check for FCP transport error */
1144         if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1145                 QEDF_ERR(&(qedf->dbg_ctx),
1146                     "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1147                     "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1148                     io_req->fcp_rsp_code);
1149                 sc_cmd->result = DID_BUS_BUSY << 16;
1150                 goto out;
1151         }
1152
1153         fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1154             FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1155         if (fw_residual_flag) {
1156                 QEDF_ERR(&(qedf->dbg_ctx),
1157                     "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1158                     "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1159                     fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1160                     cqe->cqe_info.rsp_info.fw_residual);
1161
1162                 if (io_req->cdb_status == 0)
1163                         sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1164                 else
1165                         sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1166
1167                 /* Abort the command since we did not get all the data */
1168                 init_completion(&io_req->abts_done);
1169                 rval = qedf_initiate_abts(io_req, true);
1170                 if (rval) {
1171                         QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1172                         sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1173                 }
1174
1175                 /*
1176                  * Set resid to the whole buffer length so we won't try to resue
1177                  * any previously data.
1178                  */
1179                 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1180                 goto out;
1181         }
1182
1183         switch (io_req->fcp_status) {
1184         case FC_GOOD:
1185                 if (io_req->cdb_status == 0) {
1186                         /* Good I/O completion */
1187                         sc_cmd->result = DID_OK << 16;
1188                 } else {
1189                         refcount = kref_read(&io_req->refcount);
1190                         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1191                             "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1192                             "lba=%02x%02x%02x%02x cdb_status=%d "
1193                             "fcp_resid=0x%x refcount=%d.\n",
1194                             qedf->lport->host->host_no, sc_cmd->device->id,
1195                             sc_cmd->device->lun, io_req->xid,
1196                             sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1197                             sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1198                             io_req->cdb_status, io_req->fcp_resid,
1199                             refcount);
1200                         sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1201
1202                         if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1203                             io_req->cdb_status == SAM_STAT_BUSY) {
1204                                 /*
1205                                  * Check whether we need to set retry_delay at
1206                                  * all based on retry_delay module parameter
1207                                  * and the status qualifier.
1208                                  */
1209
1210                                 /* Upper 2 bits */
1211                                 scope = fcp_rsp->retry_delay_timer & 0xC000;
1212                                 /* Lower 14 bits */
1213                                 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1214
1215                                 if (qedf_retry_delay &&
1216                                     scope > 0 && qualifier > 0 &&
1217                                     qualifier <= 0x3FEF) {
1218                                         /* Check we don't go over the max */
1219                                         if (qualifier > QEDF_RETRY_DELAY_MAX)
1220                                                 qualifier =
1221                                                     QEDF_RETRY_DELAY_MAX;
1222                                         fcport->retry_delay_timestamp =
1223                                             jiffies + (qualifier * HZ / 10);
1224                                 }
1225                                 /* Record stats */
1226                                 if (io_req->cdb_status ==
1227                                     SAM_STAT_TASK_SET_FULL)
1228                                         qedf->task_set_fulls++;
1229                                 else
1230                                         qedf->busy++;
1231                         }
1232                 }
1233                 if (io_req->fcp_resid)
1234                         scsi_set_resid(sc_cmd, io_req->fcp_resid);
1235                 break;
1236         default:
1237                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1238                            io_req->fcp_status);
1239                 break;
1240         }
1241
1242 out:
1243         if (qedf_io_tracing)
1244                 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1245
1246         io_req->sc_cmd = NULL;
1247         sc_cmd->SCp.ptr =  NULL;
1248         sc_cmd->scsi_done(sc_cmd);
1249         kref_put(&io_req->refcount, qedf_release_cmd);
1250 }
1251
1252 /* Return a SCSI command in some other context besides a normal completion */
1253 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1254         int result)
1255 {
1256         u16 xid;
1257         struct scsi_cmnd *sc_cmd;
1258         int refcount;
1259
1260         if (!io_req)
1261                 return;
1262
1263         xid = io_req->xid;
1264         sc_cmd = io_req->sc_cmd;
1265
1266         if (!sc_cmd) {
1267                 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1268                 return;
1269         }
1270
1271         if (!sc_cmd->SCp.ptr) {
1272                 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1273                     "another context.\n");
1274                 return;
1275         }
1276
1277         qedf_unmap_sg_list(qedf, io_req);
1278
1279         sc_cmd->result = result << 16;
1280         refcount = kref_read(&io_req->refcount);
1281         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1282             "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1283             "allowed=%d retries=%d refcount=%d.\n",
1284             qedf->lport->host->host_no, sc_cmd->device->id,
1285             sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1286             sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1287             sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1288             refcount);
1289
1290         /*
1291          * Set resid to the whole buffer length so we won't try to resue any
1292          * previously read data
1293          */
1294         scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1295
1296         if (qedf_io_tracing)
1297                 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1298
1299         io_req->sc_cmd = NULL;
1300         sc_cmd->SCp.ptr = NULL;
1301         sc_cmd->scsi_done(sc_cmd);
1302         kref_put(&io_req->refcount, qedf_release_cmd);
1303 }
1304
1305 /*
1306  * Handle warning type CQE completions. This is mainly used for REC timer
1307  * popping.
1308  */
1309 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1310         struct qedf_ioreq *io_req)
1311 {
1312         int rval, i;
1313         struct qedf_rport *fcport = io_req->fcport;
1314         u64 err_warn_bit_map;
1315         u8 err_warn = 0xff;
1316
1317         if (!cqe)
1318                 return;
1319
1320         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1321                   "xid=0x%x\n", io_req->xid);
1322         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1323                   "err_warn_bitmap=%08x:%08x\n",
1324                   le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1325                   le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1326         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1327                   "rx_buff_off=%08x, rx_id=%04x\n",
1328                   le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1329                   le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1330                   le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1331
1332         /* Normalize the error bitmap value to an just an unsigned int */
1333         err_warn_bit_map = (u64)
1334             ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1335             (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1336         for (i = 0; i < 64; i++) {
1337                 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1338                         err_warn = i;
1339                         break;
1340                 }
1341         }
1342
1343         /* Check if REC TOV expired if this is a tape device */
1344         if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1345                 if (err_warn ==
1346                     FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1347                         QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1348                         if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1349                                 io_req->rx_buf_off =
1350                                     cqe->cqe_info.err_info.rx_buf_off;
1351                                 io_req->tx_buf_off =
1352                                     cqe->cqe_info.err_info.tx_buf_off;
1353                                 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1354                                 rval = qedf_send_rec(io_req);
1355                                 /*
1356                                  * We only want to abort the io_req if we
1357                                  * can't queue the REC command as we want to
1358                                  * keep the exchange open for recovery.
1359                                  */
1360                                 if (rval)
1361                                         goto send_abort;
1362                         }
1363                         return;
1364                 }
1365         }
1366
1367 send_abort:
1368         init_completion(&io_req->abts_done);
1369         rval = qedf_initiate_abts(io_req, true);
1370         if (rval)
1371                 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1372 }
1373
1374 /* Cleanup a command when we receive an error detection completion */
1375 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1376         struct qedf_ioreq *io_req)
1377 {
1378         int rval;
1379
1380         if (!cqe)
1381                 return;
1382
1383         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1384                   "xid=0x%x\n", io_req->xid);
1385         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1386                   "err_warn_bitmap=%08x:%08x\n",
1387                   le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1388                   le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1389         QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1390                   "rx_buff_off=%08x, rx_id=%04x\n",
1391                   le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1392                   le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1393                   le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1394
1395         if (qedf->stop_io_on_error) {
1396                 qedf_stop_all_io(qedf);
1397                 return;
1398         }
1399
1400         init_completion(&io_req->abts_done);
1401         rval = qedf_initiate_abts(io_req, true);
1402         if (rval)
1403                 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1404 }
1405
1406 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1407         struct qedf_ioreq *els_req)
1408 {
1409         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1410             "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1411             kref_read(&els_req->refcount));
1412
1413         /*
1414          * Need to distinguish this from a timeout when calling the
1415          * els_req->cb_func.
1416          */
1417         els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1418
1419         /* Cancel the timer */
1420         cancel_delayed_work_sync(&els_req->timeout_work);
1421
1422         /* Call callback function to complete command */
1423         if (els_req->cb_func && els_req->cb_arg) {
1424                 els_req->cb_func(els_req->cb_arg);
1425                 els_req->cb_arg = NULL;
1426         }
1427
1428         /* Release kref for original initiate_els */
1429         kref_put(&els_req->refcount, qedf_release_cmd);
1430 }
1431
1432 /* A value of -1 for lun is a wild card that means flush all
1433  * active SCSI I/Os for the target.
1434  */
1435 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1436 {
1437         struct qedf_ioreq *io_req;
1438         struct qedf_ctx *qedf;
1439         struct qedf_cmd_mgr *cmd_mgr;
1440         int i, rc;
1441
1442         if (!fcport)
1443                 return;
1444
1445         /* Check that fcport is still offloaded */
1446         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1447                 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1448                 return;
1449         }
1450
1451         qedf = fcport->qedf;
1452         cmd_mgr = qedf->cmd_mgr;
1453
1454         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1455
1456         for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1457                 io_req = &cmd_mgr->cmds[i];
1458
1459                 if (!io_req)
1460                         continue;
1461                 if (io_req->fcport != fcport)
1462                         continue;
1463                 if (io_req->cmd_type == QEDF_ELS) {
1464                         rc = kref_get_unless_zero(&io_req->refcount);
1465                         if (!rc) {
1466                                 QEDF_ERR(&(qedf->dbg_ctx),
1467                                     "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1468                                     io_req, io_req->xid);
1469                                 continue;
1470                         }
1471                         qedf_flush_els_req(qedf, io_req);
1472                         /*
1473                          * Release the kref and go back to the top of the
1474                          * loop.
1475                          */
1476                         goto free_cmd;
1477                 }
1478
1479                 if (io_req->cmd_type == QEDF_ABTS) {
1480                         rc = kref_get_unless_zero(&io_req->refcount);
1481                         if (!rc) {
1482                                 QEDF_ERR(&(qedf->dbg_ctx),
1483                                     "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1484                                     io_req, io_req->xid);
1485                                 continue;
1486                         }
1487                         QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1488                             "Flushing abort xid=0x%x.\n", io_req->xid);
1489
1490                         clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1491
1492                         if (io_req->sc_cmd) {
1493                                 if (io_req->return_scsi_cmd_on_abts)
1494                                         qedf_scsi_done(qedf, io_req, DID_ERROR);
1495                         }
1496
1497                         /* Notify eh_abort handler that ABTS is complete */
1498                         complete(&io_req->abts_done);
1499                         kref_put(&io_req->refcount, qedf_release_cmd);
1500
1501                         goto free_cmd;
1502                 }
1503
1504                 if (!io_req->sc_cmd)
1505                         continue;
1506                 if (lun > 0) {
1507                         if (io_req->sc_cmd->device->lun !=
1508                             (u64)lun)
1509                                 continue;
1510                 }
1511
1512                 /*
1513                  * Use kref_get_unless_zero in the unlikely case the command
1514                  * we're about to flush was completed in the normal SCSI path
1515                  */
1516                 rc = kref_get_unless_zero(&io_req->refcount);
1517                 if (!rc) {
1518                         QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1519                             "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1520                         continue;
1521                 }
1522                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1523                     "Cleanup xid=0x%x.\n", io_req->xid);
1524
1525                 /* Cleanup task and return I/O mid-layer */
1526                 qedf_initiate_cleanup(io_req, true);
1527
1528 free_cmd:
1529                 kref_put(&io_req->refcount, qedf_release_cmd);
1530         }
1531 }
1532
1533 /*
1534  * Initiate a ABTS middle path command. Note that we don't have to initialize
1535  * the task context for an ABTS task.
1536  */
1537 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1538 {
1539         struct fc_lport *lport;
1540         struct qedf_rport *fcport = io_req->fcport;
1541         struct fc_rport_priv *rdata;
1542         struct qedf_ctx *qedf;
1543         u16 xid;
1544         u32 r_a_tov = 0;
1545         int rc = 0;
1546         unsigned long flags;
1547         struct fcoe_wqe *sqe;
1548         u16 sqe_idx;
1549
1550         /* Sanity check qedf_rport before dereferencing any pointers */
1551         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1552                 QEDF_ERR(NULL, "tgt not offloaded\n");
1553                 rc = 1;
1554                 goto abts_err;
1555         }
1556
1557         rdata = fcport->rdata;
1558         r_a_tov = rdata->r_a_tov;
1559         qedf = fcport->qedf;
1560         lport = qedf->lport;
1561
1562         if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1563                 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1564                 rc = 1;
1565                 goto abts_err;
1566         }
1567
1568         if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1569                 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1570                 rc = 1;
1571                 goto abts_err;
1572         }
1573
1574         /* Ensure room on SQ */
1575         if (!atomic_read(&fcport->free_sqes)) {
1576                 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1577                 rc = 1;
1578                 goto abts_err;
1579         }
1580
1581         if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1582                 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1583                 rc = 1;
1584                 goto out;
1585         }
1586
1587         if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1588             test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1589             test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1590                 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1591                           "cleanup or abort processing or already "
1592                           "completed.\n", io_req->xid);
1593                 rc = 1;
1594                 goto out;
1595         }
1596
1597         kref_get(&io_req->refcount);
1598
1599         xid = io_req->xid;
1600         qedf->control_requests++;
1601         qedf->packet_aborts++;
1602
1603         /* Set the return CPU to be the same as the request one */
1604         io_req->cpu = smp_processor_id();
1605
1606         /* Set the command type to abort */
1607         io_req->cmd_type = QEDF_ABTS;
1608         io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1609
1610         set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1611         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1612                    "0x%x\n", xid);
1613
1614         qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1615
1616         spin_lock_irqsave(&fcport->rport_lock, flags);
1617
1618         sqe_idx = qedf_get_sqe_idx(fcport);
1619         sqe = &fcport->sq[sqe_idx];
1620         memset(sqe, 0, sizeof(struct fcoe_wqe));
1621         io_req->task_params->sqe = sqe;
1622
1623         init_initiator_abort_fcoe_task(io_req->task_params);
1624         qedf_ring_doorbell(fcport);
1625
1626         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1627
1628         return rc;
1629 abts_err:
1630         /*
1631          * If the ABTS task fails to queue then we need to cleanup the
1632          * task at the firmware.
1633          */
1634         qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1635 out:
1636         return rc;
1637 }
1638
1639 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1640         struct qedf_ioreq *io_req)
1641 {
1642         uint32_t r_ctl;
1643         uint16_t xid;
1644
1645         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1646                    "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1647
1648         cancel_delayed_work(&io_req->timeout_work);
1649
1650         xid = io_req->xid;
1651         r_ctl = cqe->cqe_info.abts_info.r_ctl;
1652
1653         switch (r_ctl) {
1654         case FC_RCTL_BA_ACC:
1655                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1656                     "ABTS response - ACC Send RRQ after R_A_TOV\n");
1657                 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1658                 /*
1659                  * Dont release this cmd yet. It will be relesed
1660                  * after we get RRQ response
1661                  */
1662                 kref_get(&io_req->refcount);
1663                 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1664                     msecs_to_jiffies(qedf->lport->r_a_tov));
1665                 break;
1666         /* For error cases let the cleanup return the command */
1667         case FC_RCTL_BA_RJT:
1668                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1669                    "ABTS response - RJT\n");
1670                 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1671                 break;
1672         default:
1673                 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1674                 break;
1675         }
1676
1677         clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1678
1679         if (io_req->sc_cmd) {
1680                 if (io_req->return_scsi_cmd_on_abts)
1681                         qedf_scsi_done(qedf, io_req, DID_ERROR);
1682         }
1683
1684         /* Notify eh_abort handler that ABTS is complete */
1685         complete(&io_req->abts_done);
1686
1687         kref_put(&io_req->refcount, qedf_release_cmd);
1688 }
1689
1690 int qedf_init_mp_req(struct qedf_ioreq *io_req)
1691 {
1692         struct qedf_mp_req *mp_req;
1693         struct scsi_sge *mp_req_bd;
1694         struct scsi_sge *mp_resp_bd;
1695         struct qedf_ctx *qedf = io_req->fcport->qedf;
1696         dma_addr_t addr;
1697         uint64_t sz;
1698
1699         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1700
1701         mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1702         memset(mp_req, 0, sizeof(struct qedf_mp_req));
1703
1704         if (io_req->cmd_type != QEDF_ELS) {
1705                 mp_req->req_len = sizeof(struct fcp_cmnd);
1706                 io_req->data_xfer_len = mp_req->req_len;
1707         } else
1708                 mp_req->req_len = io_req->data_xfer_len;
1709
1710         mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1711             &mp_req->req_buf_dma, GFP_KERNEL);
1712         if (!mp_req->req_buf) {
1713                 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1714                 qedf_free_mp_resc(io_req);
1715                 return -ENOMEM;
1716         }
1717
1718         mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1719             QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
1720         if (!mp_req->resp_buf) {
1721                 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
1722                           "buffer\n");
1723                 qedf_free_mp_resc(io_req);
1724                 return -ENOMEM;
1725         }
1726
1727         /* Allocate and map mp_req_bd and mp_resp_bd */
1728         sz = sizeof(struct scsi_sge);
1729         mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1730             &mp_req->mp_req_bd_dma, GFP_KERNEL);
1731         if (!mp_req->mp_req_bd) {
1732                 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
1733                 qedf_free_mp_resc(io_req);
1734                 return -ENOMEM;
1735         }
1736
1737         mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1738             &mp_req->mp_resp_bd_dma, GFP_KERNEL);
1739         if (!mp_req->mp_resp_bd) {
1740                 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
1741                 qedf_free_mp_resc(io_req);
1742                 return -ENOMEM;
1743         }
1744
1745         /* Fill bd table */
1746         addr = mp_req->req_buf_dma;
1747         mp_req_bd = mp_req->mp_req_bd;
1748         mp_req_bd->sge_addr.lo = U64_LO(addr);
1749         mp_req_bd->sge_addr.hi = U64_HI(addr);
1750         mp_req_bd->sge_len = QEDF_PAGE_SIZE;
1751
1752         /*
1753          * MP buffer is either a task mgmt command or an ELS.
1754          * So the assumption is that it consumes a single bd
1755          * entry in the bd table
1756          */
1757         mp_resp_bd = mp_req->mp_resp_bd;
1758         addr = mp_req->resp_buf_dma;
1759         mp_resp_bd->sge_addr.lo = U64_LO(addr);
1760         mp_resp_bd->sge_addr.hi = U64_HI(addr);
1761         mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
1762
1763         return 0;
1764 }
1765
1766 /*
1767  * Last ditch effort to clear the port if it's stuck. Used only after a
1768  * cleanup task times out.
1769  */
1770 static void qedf_drain_request(struct qedf_ctx *qedf)
1771 {
1772         if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1773                 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
1774                 return;
1775         }
1776
1777         /* Set bit to return all queuecommand requests as busy */
1778         set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1779
1780         /* Call qed drain request for function. Should be synchronous */
1781         qed_ops->common->drain(qedf->cdev);
1782
1783         /* Settle time for CQEs to be returned */
1784         msleep(100);
1785
1786         /* Unplug and continue */
1787         clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1788 }
1789
1790 /*
1791  * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1792  * FAILURE.
1793  */
1794 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1795         bool return_scsi_cmd_on_abts)
1796 {
1797         struct qedf_rport *fcport;
1798         struct qedf_ctx *qedf;
1799         uint16_t xid;
1800         struct e4_fcoe_task_context *task;
1801         int tmo = 0;
1802         int rc = SUCCESS;
1803         unsigned long flags;
1804         struct fcoe_wqe *sqe;
1805         u16 sqe_idx;
1806
1807         fcport = io_req->fcport;
1808         if (!fcport) {
1809                 QEDF_ERR(NULL, "fcport is NULL.\n");
1810                 return SUCCESS;
1811         }
1812
1813         /* Sanity check qedf_rport before dereferencing any pointers */
1814         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1815                 QEDF_ERR(NULL, "tgt not offloaded\n");
1816                 rc = 1;
1817                 return SUCCESS;
1818         }
1819
1820         qedf = fcport->qedf;
1821         if (!qedf) {
1822                 QEDF_ERR(NULL, "qedf is NULL.\n");
1823                 return SUCCESS;
1824         }
1825
1826         if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1827             test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
1828                 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1829                           "cleanup processing or already completed.\n",
1830                           io_req->xid);
1831                 return SUCCESS;
1832         }
1833
1834         /* Ensure room on SQ */
1835         if (!atomic_read(&fcport->free_sqes)) {
1836                 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1837                 return FAILED;
1838         }
1839
1840
1841         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1842             io_req->xid);
1843
1844         /* Cleanup cmds re-use the same TID as the original I/O */
1845         xid = io_req->xid;
1846         io_req->cmd_type = QEDF_CLEANUP;
1847         io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1848
1849         /* Set the return CPU to be the same as the request one */
1850         io_req->cpu = smp_processor_id();
1851
1852         set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1853
1854         task = qedf_get_task_mem(&qedf->tasks, xid);
1855
1856         init_completion(&io_req->tm_done);
1857
1858         spin_lock_irqsave(&fcport->rport_lock, flags);
1859
1860         sqe_idx = qedf_get_sqe_idx(fcport);
1861         sqe = &fcport->sq[sqe_idx];
1862         memset(sqe, 0, sizeof(struct fcoe_wqe));
1863         io_req->task_params->sqe = sqe;
1864
1865         init_initiator_cleanup_fcoe_task(io_req->task_params);
1866         qedf_ring_doorbell(fcport);
1867
1868         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1869
1870         tmo = wait_for_completion_timeout(&io_req->tm_done,
1871             QEDF_CLEANUP_TIMEOUT * HZ);
1872
1873         if (!tmo) {
1874                 rc = FAILED;
1875                 /* Timeout case */
1876                 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
1877                           "xid=%x.\n", io_req->xid);
1878                 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1879                 /* Issue a drain request if cleanup task times out */
1880                 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
1881                 qedf_drain_request(qedf);
1882         }
1883
1884         if (io_req->sc_cmd) {
1885                 if (io_req->return_scsi_cmd_on_abts)
1886                         qedf_scsi_done(qedf, io_req, DID_ERROR);
1887         }
1888
1889         if (rc == SUCCESS)
1890                 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
1891         else
1892                 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
1893
1894         return rc;
1895 }
1896
1897 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1898         struct qedf_ioreq *io_req)
1899 {
1900         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
1901                    io_req->xid);
1902
1903         clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1904
1905         /* Complete so we can finish cleaning up the I/O */
1906         complete(&io_req->tm_done);
1907 }
1908
1909 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1910         uint8_t tm_flags)
1911 {
1912         struct qedf_ioreq *io_req;
1913         struct e4_fcoe_task_context *task;
1914         struct qedf_ctx *qedf = fcport->qedf;
1915         struct fc_lport *lport = qedf->lport;
1916         int rc = 0;
1917         uint16_t xid;
1918         int tmo = 0;
1919         unsigned long flags;
1920         struct fcoe_wqe *sqe;
1921         u16 sqe_idx;
1922
1923         if (!sc_cmd) {
1924                 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
1925                 return FAILED;
1926         }
1927
1928         if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1929                 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
1930                 rc = FAILED;
1931                 return FAILED;
1932         }
1933
1934         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
1935                    "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
1936
1937         io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
1938         if (!io_req) {
1939                 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
1940                 rc = -EAGAIN;
1941                 goto reset_tmf_err;
1942         }
1943
1944         if (tm_flags == FCP_TMF_LUN_RESET)
1945                 qedf->lun_resets++;
1946         else if (tm_flags == FCP_TMF_TGT_RESET)
1947                 qedf->target_resets++;
1948
1949         /* Initialize rest of io_req fields */
1950         io_req->sc_cmd = sc_cmd;
1951         io_req->fcport = fcport;
1952         io_req->cmd_type = QEDF_TASK_MGMT_CMD;
1953
1954         /* Set the return CPU to be the same as the request one */
1955         io_req->cpu = smp_processor_id();
1956
1957         /* Set TM flags */
1958         io_req->io_req_flags = QEDF_READ;
1959         io_req->data_xfer_len = 0;
1960         io_req->tm_flags = tm_flags;
1961
1962         /* Default is to return a SCSI command when an error occurs */
1963         io_req->return_scsi_cmd_on_abts = true;
1964
1965         /* Obtain exchange id */
1966         xid = io_req->xid;
1967
1968         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
1969                    "0x%x\n", xid);
1970
1971         /* Initialize task context for this IO request */
1972         task = qedf_get_task_mem(&qedf->tasks, xid);
1973
1974         init_completion(&io_req->tm_done);
1975
1976         spin_lock_irqsave(&fcport->rport_lock, flags);
1977
1978         sqe_idx = qedf_get_sqe_idx(fcport);
1979         sqe = &fcport->sq[sqe_idx];
1980         memset(sqe, 0, sizeof(struct fcoe_wqe));
1981
1982         qedf_init_task(fcport, lport, io_req, task, sqe);
1983         qedf_ring_doorbell(fcport);
1984
1985         spin_unlock_irqrestore(&fcport->rport_lock, flags);
1986
1987         tmo = wait_for_completion_timeout(&io_req->tm_done,
1988             QEDF_TM_TIMEOUT * HZ);
1989
1990         if (!tmo) {
1991                 rc = FAILED;
1992                 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
1993         } else {
1994                 /* Check TMF response code */
1995                 if (io_req->fcp_rsp_code == 0)
1996                         rc = SUCCESS;
1997                 else
1998                         rc = FAILED;
1999         }
2000
2001         if (tm_flags == FCP_TMF_LUN_RESET)
2002                 qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
2003         else
2004                 qedf_flush_active_ios(fcport, -1);
2005
2006         kref_put(&io_req->refcount, qedf_release_cmd);
2007
2008         if (rc != SUCCESS) {
2009                 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2010                 rc = FAILED;
2011         } else {
2012                 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2013                 rc = SUCCESS;
2014         }
2015 reset_tmf_err:
2016         return rc;
2017 }
2018
2019 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2020 {
2021         struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2022         struct fc_rport_libfc_priv *rp = rport->dd_data;
2023         struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2024         struct qedf_ctx *qedf;
2025         struct fc_lport *lport;
2026         int rc = SUCCESS;
2027         int rval;
2028
2029         rval = fc_remote_port_chkready(rport);
2030
2031         if (rval) {
2032                 QEDF_ERR(NULL, "device_reset rport not ready\n");
2033                 rc = FAILED;
2034                 goto tmf_err;
2035         }
2036
2037         if (fcport == NULL) {
2038                 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2039                 rc = FAILED;
2040                 goto tmf_err;
2041         }
2042
2043         qedf = fcport->qedf;
2044         lport = qedf->lport;
2045
2046         if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2047             test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2048                 rc = SUCCESS;
2049                 goto tmf_err;
2050         }
2051
2052         if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2053                 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2054                 rc = FAILED;
2055                 goto tmf_err;
2056         }
2057
2058         rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2059
2060 tmf_err:
2061         return rc;
2062 }
2063
2064 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2065         struct qedf_ioreq *io_req)
2066 {
2067         struct fcoe_cqe_rsp_info *fcp_rsp;
2068
2069         fcp_rsp = &cqe->cqe_info.rsp_info;
2070         qedf_parse_fcp_rsp(io_req, fcp_rsp);
2071
2072         io_req->sc_cmd = NULL;
2073         complete(&io_req->tm_done);
2074 }
2075
2076 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2077         struct fcoe_cqe *cqe)
2078 {
2079         unsigned long flags;
2080         uint16_t tmp;
2081         uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2082         u32 payload_len, crc;
2083         struct fc_frame_header *fh;
2084         struct fc_frame *fp;
2085         struct qedf_io_work *io_work;
2086         u32 bdq_idx;
2087         void *bdq_addr;
2088         struct scsi_bd *p_bd_info;
2089
2090         p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2091         QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2092                   "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2093                   le32_to_cpu(p_bd_info->address.hi),
2094                   le32_to_cpu(p_bd_info->address.lo),
2095                   le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2096                   le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2097                   qedf->bdq_prod_idx, pktlen);
2098
2099         bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2100         if (bdq_idx >= QEDF_BDQ_SIZE) {
2101                 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2102                     bdq_idx);
2103                 goto increment_prod;
2104         }
2105
2106         bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2107         if (!bdq_addr) {
2108                 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2109                     "unsolicited packet.\n");
2110                 goto increment_prod;
2111         }
2112
2113         if (qedf_dump_frames) {
2114                 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2115                     "BDQ frame is at addr=%p.\n", bdq_addr);
2116                 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2117                     (void *)bdq_addr, pktlen, false);
2118         }
2119
2120         /* Allocate frame */
2121         payload_len = pktlen - sizeof(struct fc_frame_header);
2122         fp = fc_frame_alloc(qedf->lport, payload_len);
2123         if (!fp) {
2124                 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2125                 goto increment_prod;
2126         }
2127
2128         /* Copy data from BDQ buffer into fc_frame struct */
2129         fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2130         memcpy(fh, (void *)bdq_addr, pktlen);
2131
2132         /* Initialize the frame so libfc sees it as a valid frame */
2133         crc = fcoe_fc_crc(fp);
2134         fc_frame_init(fp);
2135         fr_dev(fp) = qedf->lport;
2136         fr_sof(fp) = FC_SOF_I3;
2137         fr_eof(fp) = FC_EOF_T;
2138         fr_crc(fp) = cpu_to_le32(~crc);
2139
2140         /*
2141          * We need to return the frame back up to libfc in a non-atomic
2142          * context
2143          */
2144         io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2145         if (!io_work) {
2146                 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2147                            "work for I/O completion.\n");
2148                 fc_frame_free(fp);
2149                 goto increment_prod;
2150         }
2151         memset(io_work, 0, sizeof(struct qedf_io_work));
2152
2153         INIT_WORK(&io_work->work, qedf_fp_io_handler);
2154
2155         /* Copy contents of CQE for deferred processing */
2156         memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2157
2158         io_work->qedf = qedf;
2159         io_work->fp = fp;
2160
2161         queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2162 increment_prod:
2163         spin_lock_irqsave(&qedf->hba_lock, flags);
2164
2165         /* Increment producer to let f/w know we've handled the frame */
2166         qedf->bdq_prod_idx++;
2167
2168         /* Producer index wraps at uint16_t boundary */
2169         if (qedf->bdq_prod_idx == 0xffff)
2170                 qedf->bdq_prod_idx = 0;
2171
2172         writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2173         tmp = readw(qedf->bdq_primary_prod);
2174         writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2175         tmp = readw(qedf->bdq_secondary_prod);
2176
2177         spin_unlock_irqrestore(&qedf->hba_lock, flags);
2178 }