2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2018 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 unsigned int timer_msec)
17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 msecs_to_jiffies(timer_msec));
21 static void qedf_cmd_timeout(struct work_struct *work)
24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work);
26 struct qedf_ctx *qedf;
27 struct qedf_rport *fcport;
31 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
35 fcport = io_req->fcport;
36 if (io_req->fcport == NULL) {
37 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
43 switch (io_req->cmd_type) {
46 QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
51 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
53 /* Cleanup timed out ABTS */
54 qedf_initiate_cleanup(io_req, true);
55 complete(&io_req->abts_done);
58 * Need to call kref_put for reference taken when initiate_abts
59 * was called since abts_compl won't be called now that we've
60 * cleaned up the task.
62 kref_put(&io_req->refcount, qedf_release_cmd);
65 * Now that the original I/O and the ABTS are complete see
66 * if we need to reconnect to the target.
68 qedf_restart_rport(fcport);
71 kref_get(&io_req->refcount);
73 * Don't attempt to clean an ELS timeout as any subseqeunt
74 * ABTS or cleanup requests just hang. For now just free
75 * the resources of the original I/O and the RRQ
77 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
79 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
80 /* Call callback function to complete command */
81 if (io_req->cb_func && io_req->cb_arg) {
82 op = io_req->cb_arg->op;
83 io_req->cb_func(io_req->cb_arg);
84 io_req->cb_arg = NULL;
86 qedf_initiate_cleanup(io_req, true);
87 kref_put(&io_req->refcount, qedf_release_cmd);
89 case QEDF_SEQ_CLEANUP:
90 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
91 "xid=0x%x.\n", io_req->xid);
92 qedf_initiate_cleanup(io_req, true);
93 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
94 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
101 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
103 struct io_bdt *bdt_info;
104 struct qedf_ctx *qedf = cmgr->qedf;
106 u16 min_xid = QEDF_MIN_XID;
107 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
110 struct qedf_ioreq *io_req;
112 num_ios = max_xid - min_xid + 1;
114 /* Free fcoe_bdt_ctx structures */
115 if (!cmgr->io_bdt_pool)
118 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
119 for (i = 0; i < num_ios; i++) {
120 bdt_info = cmgr->io_bdt_pool[i];
121 if (bdt_info->bd_tbl) {
122 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
123 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
124 bdt_info->bd_tbl = NULL;
128 /* Destroy io_bdt pool */
129 for (i = 0; i < num_ios; i++) {
130 kfree(cmgr->io_bdt_pool[i]);
131 cmgr->io_bdt_pool[i] = NULL;
134 kfree(cmgr->io_bdt_pool);
135 cmgr->io_bdt_pool = NULL;
139 for (i = 0; i < num_ios; i++) {
140 io_req = &cmgr->cmds[i];
141 kfree(io_req->sgl_task_params);
142 kfree(io_req->task_params);
143 /* Make sure we free per command sense buffer */
144 if (io_req->sense_buffer)
145 dma_free_coherent(&qedf->pdev->dev,
146 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
147 io_req->sense_buffer_dma);
148 cancel_delayed_work_sync(&io_req->rrq_work);
151 /* Free command manager itself */
155 static void qedf_handle_rrq(struct work_struct *work)
157 struct qedf_ioreq *io_req =
158 container_of(work, struct qedf_ioreq, rrq_work.work);
160 qedf_send_rrq(io_req);
164 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
166 struct qedf_cmd_mgr *cmgr;
167 struct io_bdt *bdt_info;
168 struct qedf_ioreq *io_req;
172 u16 min_xid = QEDF_MIN_XID;
173 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
175 /* Make sure num_queues is already set before calling this function */
176 if (!qedf->num_queues) {
177 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
181 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
182 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
183 "max_xid 0x%x.\n", min_xid, max_xid);
187 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
188 "0x%x.\n", min_xid, max_xid);
190 num_ios = max_xid - min_xid + 1;
192 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
194 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
199 spin_lock_init(&cmgr->lock);
202 * Initialize I/O request fields.
206 for (i = 0; i < num_ios; i++) {
207 io_req = &cmgr->cmds[i];
208 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
212 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
214 /* Allocate DMA memory to hold sense buffer */
215 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
216 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
218 if (!io_req->sense_buffer)
221 /* Allocate task parameters to pass to f/w init funcions */
222 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
224 if (!io_req->task_params) {
225 QEDF_ERR(&(qedf->dbg_ctx),
226 "Failed to allocate task_params for xid=0x%x\n",
232 * Allocate scatter/gather list info to pass to f/w init
235 io_req->sgl_task_params = kzalloc(
236 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
237 if (!io_req->sgl_task_params) {
238 QEDF_ERR(&(qedf->dbg_ctx),
239 "Failed to allocate sgl_task_params for xid=0x%x\n",
245 /* Allocate pool of io_bdts - one for each qedf_ioreq */
246 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
249 if (!cmgr->io_bdt_pool) {
250 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
254 for (i = 0; i < num_ios; i++) {
255 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
257 if (!cmgr->io_bdt_pool[i]) {
258 QEDF_WARN(&(qedf->dbg_ctx),
259 "Failed to alloc io_bdt_pool[%d].\n", i);
264 for (i = 0; i < num_ios; i++) {
265 bdt_info = cmgr->io_bdt_pool[i];
266 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
267 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
268 &bdt_info->bd_tbl_dma, GFP_KERNEL);
269 if (!bdt_info->bd_tbl) {
270 QEDF_WARN(&(qedf->dbg_ctx),
271 "Failed to alloc bdt_tbl[%d].\n", i);
275 atomic_set(&cmgr->free_list_cnt, num_ios);
276 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
277 "cmgr->free_list_cnt=%d.\n",
278 atomic_read(&cmgr->free_list_cnt));
283 qedf_cmd_mgr_free(cmgr);
287 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
289 struct qedf_ctx *qedf = fcport->qedf;
290 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
291 struct qedf_ioreq *io_req = NULL;
292 struct io_bdt *bd_tbl;
298 free_sqes = atomic_read(&fcport->free_sqes);
301 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
302 "Returning NULL, free_sqes=%d.\n ",
307 /* Limit the number of outstanding R/W tasks */
308 if ((atomic_read(&fcport->num_active_ios) >=
309 NUM_RW_TASKS_PER_CONNECTION)) {
310 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
311 "Returning NULL, num_active_ios=%d.\n",
312 atomic_read(&fcport->num_active_ios));
316 /* Limit global TIDs certain tasks */
317 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
318 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
319 "Returning NULL, free_list_cnt=%d.\n",
320 atomic_read(&cmd_mgr->free_list_cnt));
324 spin_lock_irqsave(&cmd_mgr->lock, flags);
325 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
326 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
328 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
331 /* Check to make sure command was previously freed */
332 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
336 if (i == FCOE_PARAMS_NUM_TASKS) {
337 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
341 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
342 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
344 atomic_inc(&fcport->num_active_ios);
345 atomic_dec(&fcport->free_sqes);
347 atomic_dec(&cmd_mgr->free_list_cnt);
349 io_req->cmd_mgr = cmd_mgr;
350 io_req->fcport = fcport;
352 /* Hold the io_req against deletion */
353 kref_init(&io_req->refcount);
355 /* Bind io_bdt for this io_req */
356 /* Have a static link between io_req and io_bdt_pool */
357 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
358 if (bd_tbl == NULL) {
359 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
360 kref_put(&io_req->refcount, qedf_release_cmd);
363 bd_tbl->io_req = io_req;
364 io_req->cmd_type = cmd_type;
365 io_req->tm_flags = 0;
367 /* Reset sequence offset data */
368 io_req->rx_buf_off = 0;
369 io_req->tx_buf_off = 0;
370 io_req->rx_id = 0xffff; /* No OX_ID */
375 /* Record failure for stats and return NULL to caller */
376 qedf->alloc_failures++;
380 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
382 struct qedf_mp_req *mp_req = &(io_req->mp_req);
383 struct qedf_ctx *qedf = io_req->fcport->qedf;
384 uint64_t sz = sizeof(struct scsi_sge);
387 if (mp_req->mp_req_bd) {
388 dma_free_coherent(&qedf->pdev->dev, sz,
389 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
390 mp_req->mp_req_bd = NULL;
392 if (mp_req->mp_resp_bd) {
393 dma_free_coherent(&qedf->pdev->dev, sz,
394 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
395 mp_req->mp_resp_bd = NULL;
397 if (mp_req->req_buf) {
398 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
399 mp_req->req_buf, mp_req->req_buf_dma);
400 mp_req->req_buf = NULL;
402 if (mp_req->resp_buf) {
403 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
404 mp_req->resp_buf, mp_req->resp_buf_dma);
405 mp_req->resp_buf = NULL;
409 void qedf_release_cmd(struct kref *ref)
411 struct qedf_ioreq *io_req =
412 container_of(ref, struct qedf_ioreq, refcount);
413 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
414 struct qedf_rport *fcport = io_req->fcport;
416 if (io_req->cmd_type == QEDF_ELS ||
417 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
418 qedf_free_mp_resc(io_req);
420 atomic_inc(&cmd_mgr->free_list_cnt);
421 atomic_dec(&fcport->num_active_ios);
422 if (atomic_read(&fcport->num_active_ios) < 0)
423 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
425 /* Increment task retry identifier now that the request is released */
426 io_req->task_retry_identifier++;
428 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
431 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
434 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
435 int frag_size, sg_frags;
439 if (sg_len > QEDF_BD_SPLIT_SZ)
440 frag_size = QEDF_BD_SPLIT_SZ;
443 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
444 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
445 bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
447 addr += (u64)frag_size;
454 static int qedf_map_sg(struct qedf_ioreq *io_req)
456 struct scsi_cmnd *sc = io_req->sc_cmd;
457 struct Scsi_Host *host = sc->device->host;
458 struct fc_lport *lport = shost_priv(host);
459 struct qedf_ctx *qedf = lport_priv(lport);
460 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
461 struct scatterlist *sg;
470 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
471 scsi_sg_count(sc), sc->sc_data_direction);
473 sg = scsi_sglist(sc);
476 * New condition to send single SGE as cached-SGL with length less
479 if ((sg_count == 1) && (sg_dma_len(sg) <=
480 QEDF_MAX_SGLEN_FOR_CACHESGL)) {
481 sg_len = sg_dma_len(sg);
482 addr = (u64)sg_dma_address(sg);
484 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
485 bd[bd_count].sge_addr.hi = (addr >> 32);
486 bd[bd_count].sge_len = (u16)sg_len;
491 scsi_for_each_sg(sc, sg, sg_count, i) {
492 sg_len = sg_dma_len(sg);
493 addr = (u64)sg_dma_address(sg);
494 end_addr = (u64)(addr + sg_len);
497 * First s/g element in the list so check if the end_addr
498 * is paged aligned. Also check to make sure the length is
499 * at least page size.
501 if ((i == 0) && (sg_count > 1) &&
502 ((end_addr % QEDF_PAGE_SIZE) ||
503 sg_len < QEDF_PAGE_SIZE))
504 io_req->use_slowpath = true;
506 * Last s/g element so check if the start address is paged
509 else if ((i == (sg_count - 1)) && (sg_count > 1) &&
510 (addr % QEDF_PAGE_SIZE))
511 io_req->use_slowpath = true;
513 * Intermediate s/g element so check if start and end address
516 else if ((i != 0) && (i != (sg_count - 1)) &&
517 ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
518 io_req->use_slowpath = true;
520 if (sg_len > QEDF_MAX_BD_LEN) {
521 sg_frags = qedf_split_bd(io_req, addr, sg_len,
525 bd[bd_count].sge_addr.lo = U64_LO(addr);
526 bd[bd_count].sge_addr.hi = U64_HI(addr);
527 bd[bd_count].sge_len = (uint16_t)sg_len;
530 bd_count += sg_frags;
531 byte_count += sg_len;
534 if (byte_count != scsi_bufflen(sc))
535 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
536 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
537 scsi_bufflen(sc), io_req->xid);
542 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
544 struct scsi_cmnd *sc = io_req->sc_cmd;
545 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
548 if (scsi_sg_count(sc)) {
549 bd_count = qedf_map_sg(io_req);
554 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
557 io_req->bd_tbl->bd_valid = bd_count;
562 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
563 struct fcp_cmnd *fcp_cmnd)
565 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
567 /* fcp_cmnd is 32 bytes */
568 memset(fcp_cmnd, 0, FCP_CMND_LEN);
570 /* 8 bytes: SCSI LUN info */
571 int_to_scsilun(sc_cmd->device->lun,
572 (struct scsi_lun *)&fcp_cmnd->fc_lun);
574 /* 4 bytes: flag info */
575 fcp_cmnd->fc_pri_ta = 0;
576 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
577 fcp_cmnd->fc_flags = io_req->io_req_flags;
578 fcp_cmnd->fc_cmdref = 0;
580 /* Populate data direction */
581 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
582 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
584 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
585 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
586 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
587 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
590 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
592 /* 16 bytes: CDB information */
593 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
594 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
596 /* 4 bytes: FCP data length */
597 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
600 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
601 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
602 struct fcoe_wqe *sqe)
604 enum fcoe_task_type task_type;
605 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
606 struct io_bdt *bd_tbl = io_req->bd_tbl;
610 struct qedf_ctx *qedf = fcport->qedf;
611 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
612 struct regpair sense_data_buffer_phys_addr;
617 /* Note init_initiator_rw_fcoe_task memsets the task context */
618 io_req->task = task_ctx;
619 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
620 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
621 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
623 /* Set task type bassed on DMA directio of command */
624 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
625 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
627 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
628 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
629 tx_io_size = io_req->data_xfer_len;
631 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
632 rx_io_size = io_req->data_xfer_len;
636 /* Setup the fields for fcoe_task_params */
637 io_req->task_params->context = task_ctx;
638 io_req->task_params->sqe = sqe;
639 io_req->task_params->task_type = task_type;
640 io_req->task_params->tx_io_size = tx_io_size;
641 io_req->task_params->rx_io_size = rx_io_size;
642 io_req->task_params->conn_cid = fcport->fw_cid;
643 io_req->task_params->itid = io_req->xid;
644 io_req->task_params->cq_rss_number = cq_idx;
645 io_req->task_params->is_tape_device = fcport->dev_type;
647 /* Fill in information for scatter/gather list */
648 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
649 bd_count = bd_tbl->bd_valid;
650 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
651 io_req->sgl_task_params->sgl_phys_addr.lo =
652 U64_LO(bd_tbl->bd_tbl_dma);
653 io_req->sgl_task_params->sgl_phys_addr.hi =
654 U64_HI(bd_tbl->bd_tbl_dma);
655 io_req->sgl_task_params->num_sges = bd_count;
656 io_req->sgl_task_params->total_buffer_size =
657 scsi_bufflen(io_req->sc_cmd);
658 io_req->sgl_task_params->small_mid_sge =
659 io_req->use_slowpath;
662 /* Fill in physical address of sense buffer */
663 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
664 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
666 /* fill FCP_CMND IU */
667 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
669 /* Swap fcp_cmnd since FC is big endian */
670 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
671 for (i = 0; i < cnt; i++) {
672 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
674 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
676 init_initiator_rw_fcoe_task(io_req->task_params,
677 io_req->sgl_task_params,
678 sense_data_buffer_phys_addr,
679 io_req->task_retry_identifier, fcp_cmnd);
681 /* Increment SGL type counters */
683 qedf->single_sge_ios++;
684 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
685 } else if (io_req->use_slowpath) {
686 qedf->slow_sge_ios++;
687 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
689 qedf->fast_sge_ios++;
690 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
694 void qedf_init_mp_task(struct qedf_ioreq *io_req,
695 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
697 struct qedf_mp_req *mp_req = &(io_req->mp_req);
698 struct qedf_rport *fcport = io_req->fcport;
699 struct qedf_ctx *qedf = io_req->fcport->qedf;
700 struct fc_frame_header *fc_hdr;
701 struct fcoe_tx_mid_path_params task_fc_hdr;
702 struct scsi_sgl_task_params tx_sgl_task_params;
703 struct scsi_sgl_task_params rx_sgl_task_params;
705 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
706 "Initializing MP task for cmd_type=%d\n",
709 qedf->control_requests++;
711 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
712 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
713 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
714 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
716 /* Setup the task from io_req for easy reference */
717 io_req->task = task_ctx;
719 /* Setup the fields for fcoe_task_params */
720 io_req->task_params->context = task_ctx;
721 io_req->task_params->sqe = sqe;
722 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
723 io_req->task_params->tx_io_size = io_req->data_xfer_len;
724 /* rx_io_size tells the f/w how large a response buffer we have */
725 io_req->task_params->rx_io_size = PAGE_SIZE;
726 io_req->task_params->conn_cid = fcport->fw_cid;
727 io_req->task_params->itid = io_req->xid;
728 /* Return middle path commands on CQ 0 */
729 io_req->task_params->cq_rss_number = 0;
730 io_req->task_params->is_tape_device = fcport->dev_type;
732 fc_hdr = &(mp_req->req_fc_hdr);
733 /* Set OX_ID and RX_ID based on driver task id */
734 fc_hdr->fh_ox_id = io_req->xid;
735 fc_hdr->fh_rx_id = htons(0xffff);
737 /* Set up FC header information */
738 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
739 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
740 task_fc_hdr.type = fc_hdr->fh_type;
741 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
742 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
743 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
744 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
746 /* Set up s/g list parameters for request buffer */
747 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
748 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
749 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
750 tx_sgl_task_params.num_sges = 1;
751 /* Set PAGE_SIZE for now since sg element is that size ??? */
752 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
753 tx_sgl_task_params.small_mid_sge = 0;
755 /* Set up s/g list parameters for request buffer */
756 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
757 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
758 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
759 rx_sgl_task_params.num_sges = 1;
760 /* Set PAGE_SIZE for now since sg element is that size ??? */
761 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
762 rx_sgl_task_params.small_mid_sge = 0;
766 * Last arg is 0 as previous code did not set that we wanted the
767 * fc header information.
769 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
772 &rx_sgl_task_params, 0);
774 /* Midpath requests always consume 1 SGE */
775 qedf->single_sge_ios++;
778 /* Presumed that fcport->rport_lock is held */
779 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
781 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
784 rval = fcport->sq_prod_idx;
786 /* Adjust ring index */
787 fcport->sq_prod_idx++;
788 fcport->fw_sq_prod_idx++;
789 if (fcport->sq_prod_idx == total_sqe)
790 fcport->sq_prod_idx = 0;
795 void qedf_ring_doorbell(struct qedf_rport *fcport)
797 struct fcoe_db_data dbell = { 0 };
801 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
802 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
803 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
804 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
806 dbell.sq_prod = fcport->fw_sq_prod_idx;
807 writel(*(u32 *)&dbell, fcport->p_doorbell);
808 /* Make sure SQ index is updated so f/w prcesses requests in order */
813 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
816 struct qedf_ctx *qedf = fcport->qedf;
817 struct qedf_io_log *io_log;
818 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
822 spin_lock_irqsave(&qedf->io_trace_lock, flags);
824 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
825 io_log->direction = direction;
826 io_log->task_id = io_req->xid;
827 io_log->port_id = fcport->rdata->ids.port_id;
828 io_log->lun = sc_cmd->device->lun;
829 io_log->op = op = sc_cmd->cmnd[0];
830 io_log->lba[0] = sc_cmd->cmnd[2];
831 io_log->lba[1] = sc_cmd->cmnd[3];
832 io_log->lba[2] = sc_cmd->cmnd[4];
833 io_log->lba[3] = sc_cmd->cmnd[5];
834 io_log->bufflen = scsi_bufflen(sc_cmd);
835 io_log->sg_count = scsi_sg_count(sc_cmd);
836 io_log->result = sc_cmd->result;
837 io_log->jiffies = jiffies;
838 io_log->refcount = kref_read(&io_req->refcount);
840 if (direction == QEDF_IO_TRACE_REQ) {
841 /* For requests we only care abot the submission CPU */
842 io_log->req_cpu = io_req->cpu;
845 } else if (direction == QEDF_IO_TRACE_RSP) {
846 io_log->req_cpu = io_req->cpu;
847 io_log->int_cpu = io_req->int_cpu;
848 io_log->rsp_cpu = smp_processor_id();
851 io_log->sge_type = io_req->sge_type;
853 qedf->io_trace_idx++;
854 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
855 qedf->io_trace_idx = 0;
857 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
860 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
862 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
863 struct Scsi_Host *host = sc_cmd->device->host;
864 struct fc_lport *lport = shost_priv(host);
865 struct qedf_ctx *qedf = lport_priv(lport);
866 struct e4_fcoe_task_context *task_ctx;
868 enum fcoe_task_type req_type = 0;
869 struct fcoe_wqe *sqe;
872 /* Initialize rest of io_req fileds */
873 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
874 sc_cmd->SCp.ptr = (char *)io_req;
875 io_req->use_slowpath = false; /* Assume fast SGL by default */
877 /* Record which cpu this request is associated with */
878 io_req->cpu = smp_processor_id();
880 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
881 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
882 io_req->io_req_flags = QEDF_READ;
883 qedf->input_requests++;
884 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
885 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
886 io_req->io_req_flags = QEDF_WRITE;
887 qedf->output_requests++;
889 io_req->io_req_flags = 0;
890 qedf->control_requests++;
895 /* Build buffer descriptor list for firmware from sg list */
896 if (qedf_build_bd_list_from_sg(io_req)) {
897 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
898 kref_put(&io_req->refcount, qedf_release_cmd);
902 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
903 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
904 kref_put(&io_req->refcount, qedf_release_cmd);
907 /* Obtain free SQE */
908 sqe_idx = qedf_get_sqe_idx(fcport);
909 sqe = &fcport->sq[sqe_idx];
910 memset(sqe, 0, sizeof(struct fcoe_wqe));
912 /* Get the task context */
913 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
915 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
917 kref_put(&io_req->refcount, qedf_release_cmd);
921 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
924 qedf_ring_doorbell(fcport);
926 if (qedf_io_tracing && io_req->sc_cmd)
927 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
933 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
935 struct fc_lport *lport = shost_priv(host);
936 struct qedf_ctx *qedf = lport_priv(lport);
937 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
938 struct fc_rport_libfc_priv *rp = rport->dd_data;
939 struct qedf_rport *fcport;
940 struct qedf_ioreq *io_req;
943 unsigned long flags = 0;
946 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
947 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
948 sc_cmd->result = DID_NO_CONNECT << 16;
949 sc_cmd->scsi_done(sc_cmd);
953 if (!qedf->pdev->msix_enabled) {
954 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
955 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
957 sc_cmd->result = DID_NO_CONNECT << 16;
958 sc_cmd->scsi_done(sc_cmd);
962 rval = fc_remote_port_chkready(rport);
964 sc_cmd->result = rval;
965 sc_cmd->scsi_done(sc_cmd);
969 /* Retry command if we are doing a qed drain operation */
970 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
971 rc = SCSI_MLQUEUE_HOST_BUSY;
975 if (lport->state != LPORT_ST_READY ||
976 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
977 rc = SCSI_MLQUEUE_HOST_BUSY;
981 /* rport and tgt are allocated together, so tgt should be non-NULL */
982 fcport = (struct qedf_rport *)&rp[1];
984 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
986 * Session is not offloaded yet. Let SCSI-ml retry
989 rc = SCSI_MLQUEUE_TARGET_BUSY;
992 if (fcport->retry_delay_timestamp) {
993 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
994 fcport->retry_delay_timestamp = 0;
996 /* If retry_delay timer is active, flow off the ML */
997 rc = SCSI_MLQUEUE_TARGET_BUSY;
1002 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1004 rc = SCSI_MLQUEUE_HOST_BUSY;
1008 io_req->sc_cmd = sc_cmd;
1010 /* Take fcport->rport_lock for posting to fcport send queue */
1011 spin_lock_irqsave(&fcport->rport_lock, flags);
1012 if (qedf_post_io_req(fcport, io_req)) {
1013 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1014 /* Return SQE to pool */
1015 atomic_inc(&fcport->free_sqes);
1016 rc = SCSI_MLQUEUE_HOST_BUSY;
1018 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1024 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1025 struct fcoe_cqe_rsp_info *fcp_rsp)
1027 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1028 struct qedf_ctx *qedf = io_req->fcport->qedf;
1029 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1030 int fcp_sns_len = 0;
1031 int fcp_rsp_len = 0;
1032 uint8_t *rsp_info, *sense_data;
1034 io_req->fcp_status = FC_GOOD;
1035 io_req->fcp_resid = 0;
1036 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1037 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1038 io_req->fcp_resid = fcp_rsp->fcp_resid;
1040 io_req->scsi_comp_flags = rsp_flags;
1041 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1042 fcp_rsp->scsi_status_code;
1045 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1046 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1049 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1050 fcp_sns_len = fcp_rsp->fcp_sns_len;
1052 io_req->fcp_rsp_len = fcp_rsp_len;
1053 io_req->fcp_sns_len = fcp_sns_len;
1054 rsp_info = sense_data = io_req->sense_buffer;
1056 /* fetch fcp_rsp_code */
1057 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1058 /* Only for task management function */
1059 io_req->fcp_rsp_code = rsp_info[3];
1060 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1061 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1062 /* Adjust sense-data location. */
1063 sense_data += fcp_rsp_len;
1066 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1067 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1068 "Truncating sense buffer\n");
1069 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1072 /* The sense buffer can be NULL for TMF commands */
1073 if (sc_cmd->sense_buffer) {
1074 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1076 memcpy(sc_cmd->sense_buffer, sense_data,
1081 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1083 struct scsi_cmnd *sc = io_req->sc_cmd;
1085 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1086 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1087 scsi_sg_count(sc), sc->sc_data_direction);
1088 io_req->bd_tbl->bd_valid = 0;
1092 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1093 struct qedf_ioreq *io_req)
1096 struct e4_fcoe_task_context *task_ctx;
1097 struct scsi_cmnd *sc_cmd;
1098 struct fcoe_cqe_rsp_info *fcp_rsp;
1099 struct qedf_rport *fcport;
1101 u16 scope, qualifier = 0;
1102 u8 fw_residual_flag = 0;
1110 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1111 sc_cmd = io_req->sc_cmd;
1112 fcp_rsp = &cqe->cqe_info.rsp_info;
1115 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1119 if (!sc_cmd->SCp.ptr) {
1120 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1121 "another context.\n");
1125 if (!sc_cmd->request) {
1126 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1127 "sc_cmd=%p.\n", sc_cmd);
1131 if (!sc_cmd->request->q) {
1132 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1133 "is not valid, sc_cmd=%p.\n", sc_cmd);
1137 fcport = io_req->fcport;
1139 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1141 qedf_unmap_sg_list(qedf, io_req);
1143 /* Check for FCP transport error */
1144 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1145 QEDF_ERR(&(qedf->dbg_ctx),
1146 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1147 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1148 io_req->fcp_rsp_code);
1149 sc_cmd->result = DID_BUS_BUSY << 16;
1153 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1154 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1155 if (fw_residual_flag) {
1156 QEDF_ERR(&(qedf->dbg_ctx),
1157 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1158 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1159 fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1160 cqe->cqe_info.rsp_info.fw_residual);
1162 if (io_req->cdb_status == 0)
1163 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1165 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1167 /* Abort the command since we did not get all the data */
1168 init_completion(&io_req->abts_done);
1169 rval = qedf_initiate_abts(io_req, true);
1171 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1172 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1176 * Set resid to the whole buffer length so we won't try to resue
1177 * any previously data.
1179 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1183 switch (io_req->fcp_status) {
1185 if (io_req->cdb_status == 0) {
1186 /* Good I/O completion */
1187 sc_cmd->result = DID_OK << 16;
1189 refcount = kref_read(&io_req->refcount);
1190 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1191 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1192 "lba=%02x%02x%02x%02x cdb_status=%d "
1193 "fcp_resid=0x%x refcount=%d.\n",
1194 qedf->lport->host->host_no, sc_cmd->device->id,
1195 sc_cmd->device->lun, io_req->xid,
1196 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1197 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1198 io_req->cdb_status, io_req->fcp_resid,
1200 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1202 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1203 io_req->cdb_status == SAM_STAT_BUSY) {
1205 * Check whether we need to set retry_delay at
1206 * all based on retry_delay module parameter
1207 * and the status qualifier.
1211 scope = fcp_rsp->retry_delay_timer & 0xC000;
1213 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1215 if (qedf_retry_delay &&
1216 scope > 0 && qualifier > 0 &&
1217 qualifier <= 0x3FEF) {
1218 /* Check we don't go over the max */
1219 if (qualifier > QEDF_RETRY_DELAY_MAX)
1221 QEDF_RETRY_DELAY_MAX;
1222 fcport->retry_delay_timestamp =
1223 jiffies + (qualifier * HZ / 10);
1226 if (io_req->cdb_status ==
1227 SAM_STAT_TASK_SET_FULL)
1228 qedf->task_set_fulls++;
1233 if (io_req->fcp_resid)
1234 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1237 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1238 io_req->fcp_status);
1243 if (qedf_io_tracing)
1244 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1246 io_req->sc_cmd = NULL;
1247 sc_cmd->SCp.ptr = NULL;
1248 sc_cmd->scsi_done(sc_cmd);
1249 kref_put(&io_req->refcount, qedf_release_cmd);
1252 /* Return a SCSI command in some other context besides a normal completion */
1253 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1257 struct scsi_cmnd *sc_cmd;
1264 sc_cmd = io_req->sc_cmd;
1267 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1271 if (!sc_cmd->SCp.ptr) {
1272 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1273 "another context.\n");
1277 qedf_unmap_sg_list(qedf, io_req);
1279 sc_cmd->result = result << 16;
1280 refcount = kref_read(&io_req->refcount);
1281 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1282 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1283 "allowed=%d retries=%d refcount=%d.\n",
1284 qedf->lport->host->host_no, sc_cmd->device->id,
1285 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1286 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1287 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1291 * Set resid to the whole buffer length so we won't try to resue any
1292 * previously read data
1294 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1296 if (qedf_io_tracing)
1297 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1299 io_req->sc_cmd = NULL;
1300 sc_cmd->SCp.ptr = NULL;
1301 sc_cmd->scsi_done(sc_cmd);
1302 kref_put(&io_req->refcount, qedf_release_cmd);
1306 * Handle warning type CQE completions. This is mainly used for REC timer
1309 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1310 struct qedf_ioreq *io_req)
1313 struct qedf_rport *fcport = io_req->fcport;
1314 u64 err_warn_bit_map;
1320 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1321 "xid=0x%x\n", io_req->xid);
1322 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1323 "err_warn_bitmap=%08x:%08x\n",
1324 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1325 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1326 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1327 "rx_buff_off=%08x, rx_id=%04x\n",
1328 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1329 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1330 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1332 /* Normalize the error bitmap value to an just an unsigned int */
1333 err_warn_bit_map = (u64)
1334 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1335 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1336 for (i = 0; i < 64; i++) {
1337 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1343 /* Check if REC TOV expired if this is a tape device */
1344 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1346 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1347 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1348 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1349 io_req->rx_buf_off =
1350 cqe->cqe_info.err_info.rx_buf_off;
1351 io_req->tx_buf_off =
1352 cqe->cqe_info.err_info.tx_buf_off;
1353 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1354 rval = qedf_send_rec(io_req);
1356 * We only want to abort the io_req if we
1357 * can't queue the REC command as we want to
1358 * keep the exchange open for recovery.
1368 init_completion(&io_req->abts_done);
1369 rval = qedf_initiate_abts(io_req, true);
1371 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1374 /* Cleanup a command when we receive an error detection completion */
1375 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1376 struct qedf_ioreq *io_req)
1383 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1384 "xid=0x%x\n", io_req->xid);
1385 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1386 "err_warn_bitmap=%08x:%08x\n",
1387 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1388 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1389 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1390 "rx_buff_off=%08x, rx_id=%04x\n",
1391 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1392 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1393 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1395 if (qedf->stop_io_on_error) {
1396 qedf_stop_all_io(qedf);
1400 init_completion(&io_req->abts_done);
1401 rval = qedf_initiate_abts(io_req, true);
1403 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1406 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1407 struct qedf_ioreq *els_req)
1409 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1410 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1411 kref_read(&els_req->refcount));
1414 * Need to distinguish this from a timeout when calling the
1417 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1419 /* Cancel the timer */
1420 cancel_delayed_work_sync(&els_req->timeout_work);
1422 /* Call callback function to complete command */
1423 if (els_req->cb_func && els_req->cb_arg) {
1424 els_req->cb_func(els_req->cb_arg);
1425 els_req->cb_arg = NULL;
1428 /* Release kref for original initiate_els */
1429 kref_put(&els_req->refcount, qedf_release_cmd);
1432 /* A value of -1 for lun is a wild card that means flush all
1433 * active SCSI I/Os for the target.
1435 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1437 struct qedf_ioreq *io_req;
1438 struct qedf_ctx *qedf;
1439 struct qedf_cmd_mgr *cmd_mgr;
1445 /* Check that fcport is still offloaded */
1446 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1447 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1451 qedf = fcport->qedf;
1452 cmd_mgr = qedf->cmd_mgr;
1454 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1456 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1457 io_req = &cmd_mgr->cmds[i];
1461 if (io_req->fcport != fcport)
1463 if (io_req->cmd_type == QEDF_ELS) {
1464 rc = kref_get_unless_zero(&io_req->refcount);
1466 QEDF_ERR(&(qedf->dbg_ctx),
1467 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1468 io_req, io_req->xid);
1471 qedf_flush_els_req(qedf, io_req);
1473 * Release the kref and go back to the top of the
1479 if (io_req->cmd_type == QEDF_ABTS) {
1480 rc = kref_get_unless_zero(&io_req->refcount);
1482 QEDF_ERR(&(qedf->dbg_ctx),
1483 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1484 io_req, io_req->xid);
1487 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1488 "Flushing abort xid=0x%x.\n", io_req->xid);
1490 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1492 if (io_req->sc_cmd) {
1493 if (io_req->return_scsi_cmd_on_abts)
1494 qedf_scsi_done(qedf, io_req, DID_ERROR);
1497 /* Notify eh_abort handler that ABTS is complete */
1498 complete(&io_req->abts_done);
1499 kref_put(&io_req->refcount, qedf_release_cmd);
1504 if (!io_req->sc_cmd)
1507 if (io_req->sc_cmd->device->lun !=
1513 * Use kref_get_unless_zero in the unlikely case the command
1514 * we're about to flush was completed in the normal SCSI path
1516 rc = kref_get_unless_zero(&io_req->refcount);
1518 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1519 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1522 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1523 "Cleanup xid=0x%x.\n", io_req->xid);
1525 /* Cleanup task and return I/O mid-layer */
1526 qedf_initiate_cleanup(io_req, true);
1529 kref_put(&io_req->refcount, qedf_release_cmd);
1534 * Initiate a ABTS middle path command. Note that we don't have to initialize
1535 * the task context for an ABTS task.
1537 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1539 struct fc_lport *lport;
1540 struct qedf_rport *fcport = io_req->fcport;
1541 struct fc_rport_priv *rdata;
1542 struct qedf_ctx *qedf;
1546 unsigned long flags;
1547 struct fcoe_wqe *sqe;
1550 /* Sanity check qedf_rport before dereferencing any pointers */
1551 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1552 QEDF_ERR(NULL, "tgt not offloaded\n");
1557 rdata = fcport->rdata;
1558 r_a_tov = rdata->r_a_tov;
1559 qedf = fcport->qedf;
1560 lport = qedf->lport;
1562 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1563 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1568 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1569 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1574 /* Ensure room on SQ */
1575 if (!atomic_read(&fcport->free_sqes)) {
1576 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1581 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1582 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1587 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1588 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1589 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1590 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1591 "cleanup or abort processing or already "
1592 "completed.\n", io_req->xid);
1597 kref_get(&io_req->refcount);
1600 qedf->control_requests++;
1601 qedf->packet_aborts++;
1603 /* Set the return CPU to be the same as the request one */
1604 io_req->cpu = smp_processor_id();
1606 /* Set the command type to abort */
1607 io_req->cmd_type = QEDF_ABTS;
1608 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1610 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1611 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1614 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1616 spin_lock_irqsave(&fcport->rport_lock, flags);
1618 sqe_idx = qedf_get_sqe_idx(fcport);
1619 sqe = &fcport->sq[sqe_idx];
1620 memset(sqe, 0, sizeof(struct fcoe_wqe));
1621 io_req->task_params->sqe = sqe;
1623 init_initiator_abort_fcoe_task(io_req->task_params);
1624 qedf_ring_doorbell(fcport);
1626 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1631 * If the ABTS task fails to queue then we need to cleanup the
1632 * task at the firmware.
1634 qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1639 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1640 struct qedf_ioreq *io_req)
1645 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1646 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1648 cancel_delayed_work(&io_req->timeout_work);
1651 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1654 case FC_RCTL_BA_ACC:
1655 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1656 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1657 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1659 * Dont release this cmd yet. It will be relesed
1660 * after we get RRQ response
1662 kref_get(&io_req->refcount);
1663 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1664 msecs_to_jiffies(qedf->lport->r_a_tov));
1666 /* For error cases let the cleanup return the command */
1667 case FC_RCTL_BA_RJT:
1668 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1669 "ABTS response - RJT\n");
1670 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1673 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1677 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1679 if (io_req->sc_cmd) {
1680 if (io_req->return_scsi_cmd_on_abts)
1681 qedf_scsi_done(qedf, io_req, DID_ERROR);
1684 /* Notify eh_abort handler that ABTS is complete */
1685 complete(&io_req->abts_done);
1687 kref_put(&io_req->refcount, qedf_release_cmd);
1690 int qedf_init_mp_req(struct qedf_ioreq *io_req)
1692 struct qedf_mp_req *mp_req;
1693 struct scsi_sge *mp_req_bd;
1694 struct scsi_sge *mp_resp_bd;
1695 struct qedf_ctx *qedf = io_req->fcport->qedf;
1699 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1701 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1702 memset(mp_req, 0, sizeof(struct qedf_mp_req));
1704 if (io_req->cmd_type != QEDF_ELS) {
1705 mp_req->req_len = sizeof(struct fcp_cmnd);
1706 io_req->data_xfer_len = mp_req->req_len;
1708 mp_req->req_len = io_req->data_xfer_len;
1710 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1711 &mp_req->req_buf_dma, GFP_KERNEL);
1712 if (!mp_req->req_buf) {
1713 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1714 qedf_free_mp_resc(io_req);
1718 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1719 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
1720 if (!mp_req->resp_buf) {
1721 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
1723 qedf_free_mp_resc(io_req);
1727 /* Allocate and map mp_req_bd and mp_resp_bd */
1728 sz = sizeof(struct scsi_sge);
1729 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1730 &mp_req->mp_req_bd_dma, GFP_KERNEL);
1731 if (!mp_req->mp_req_bd) {
1732 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
1733 qedf_free_mp_resc(io_req);
1737 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1738 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
1739 if (!mp_req->mp_resp_bd) {
1740 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
1741 qedf_free_mp_resc(io_req);
1746 addr = mp_req->req_buf_dma;
1747 mp_req_bd = mp_req->mp_req_bd;
1748 mp_req_bd->sge_addr.lo = U64_LO(addr);
1749 mp_req_bd->sge_addr.hi = U64_HI(addr);
1750 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
1753 * MP buffer is either a task mgmt command or an ELS.
1754 * So the assumption is that it consumes a single bd
1755 * entry in the bd table
1757 mp_resp_bd = mp_req->mp_resp_bd;
1758 addr = mp_req->resp_buf_dma;
1759 mp_resp_bd->sge_addr.lo = U64_LO(addr);
1760 mp_resp_bd->sge_addr.hi = U64_HI(addr);
1761 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
1767 * Last ditch effort to clear the port if it's stuck. Used only after a
1768 * cleanup task times out.
1770 static void qedf_drain_request(struct qedf_ctx *qedf)
1772 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1773 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
1777 /* Set bit to return all queuecommand requests as busy */
1778 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1780 /* Call qed drain request for function. Should be synchronous */
1781 qed_ops->common->drain(qedf->cdev);
1783 /* Settle time for CQEs to be returned */
1786 /* Unplug and continue */
1787 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1791 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1794 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1795 bool return_scsi_cmd_on_abts)
1797 struct qedf_rport *fcport;
1798 struct qedf_ctx *qedf;
1800 struct e4_fcoe_task_context *task;
1803 unsigned long flags;
1804 struct fcoe_wqe *sqe;
1807 fcport = io_req->fcport;
1809 QEDF_ERR(NULL, "fcport is NULL.\n");
1813 /* Sanity check qedf_rport before dereferencing any pointers */
1814 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1815 QEDF_ERR(NULL, "tgt not offloaded\n");
1820 qedf = fcport->qedf;
1822 QEDF_ERR(NULL, "qedf is NULL.\n");
1826 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1827 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
1828 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1829 "cleanup processing or already completed.\n",
1834 /* Ensure room on SQ */
1835 if (!atomic_read(&fcport->free_sqes)) {
1836 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1841 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1844 /* Cleanup cmds re-use the same TID as the original I/O */
1846 io_req->cmd_type = QEDF_CLEANUP;
1847 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1849 /* Set the return CPU to be the same as the request one */
1850 io_req->cpu = smp_processor_id();
1852 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1854 task = qedf_get_task_mem(&qedf->tasks, xid);
1856 init_completion(&io_req->tm_done);
1858 spin_lock_irqsave(&fcport->rport_lock, flags);
1860 sqe_idx = qedf_get_sqe_idx(fcport);
1861 sqe = &fcport->sq[sqe_idx];
1862 memset(sqe, 0, sizeof(struct fcoe_wqe));
1863 io_req->task_params->sqe = sqe;
1865 init_initiator_cleanup_fcoe_task(io_req->task_params);
1866 qedf_ring_doorbell(fcport);
1868 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1870 tmo = wait_for_completion_timeout(&io_req->tm_done,
1871 QEDF_CLEANUP_TIMEOUT * HZ);
1876 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
1877 "xid=%x.\n", io_req->xid);
1878 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1879 /* Issue a drain request if cleanup task times out */
1880 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
1881 qedf_drain_request(qedf);
1884 if (io_req->sc_cmd) {
1885 if (io_req->return_scsi_cmd_on_abts)
1886 qedf_scsi_done(qedf, io_req, DID_ERROR);
1890 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
1892 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
1897 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1898 struct qedf_ioreq *io_req)
1900 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
1903 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1905 /* Complete so we can finish cleaning up the I/O */
1906 complete(&io_req->tm_done);
1909 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1912 struct qedf_ioreq *io_req;
1913 struct e4_fcoe_task_context *task;
1914 struct qedf_ctx *qedf = fcport->qedf;
1915 struct fc_lport *lport = qedf->lport;
1919 unsigned long flags;
1920 struct fcoe_wqe *sqe;
1924 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
1928 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1929 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
1934 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
1935 "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
1937 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
1939 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
1944 if (tm_flags == FCP_TMF_LUN_RESET)
1946 else if (tm_flags == FCP_TMF_TGT_RESET)
1947 qedf->target_resets++;
1949 /* Initialize rest of io_req fields */
1950 io_req->sc_cmd = sc_cmd;
1951 io_req->fcport = fcport;
1952 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
1954 /* Set the return CPU to be the same as the request one */
1955 io_req->cpu = smp_processor_id();
1958 io_req->io_req_flags = QEDF_READ;
1959 io_req->data_xfer_len = 0;
1960 io_req->tm_flags = tm_flags;
1962 /* Default is to return a SCSI command when an error occurs */
1963 io_req->return_scsi_cmd_on_abts = true;
1965 /* Obtain exchange id */
1968 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
1971 /* Initialize task context for this IO request */
1972 task = qedf_get_task_mem(&qedf->tasks, xid);
1974 init_completion(&io_req->tm_done);
1976 spin_lock_irqsave(&fcport->rport_lock, flags);
1978 sqe_idx = qedf_get_sqe_idx(fcport);
1979 sqe = &fcport->sq[sqe_idx];
1980 memset(sqe, 0, sizeof(struct fcoe_wqe));
1982 qedf_init_task(fcport, lport, io_req, task, sqe);
1983 qedf_ring_doorbell(fcport);
1985 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1987 tmo = wait_for_completion_timeout(&io_req->tm_done,
1988 QEDF_TM_TIMEOUT * HZ);
1992 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
1994 /* Check TMF response code */
1995 if (io_req->fcp_rsp_code == 0)
2001 if (tm_flags == FCP_TMF_LUN_RESET)
2002 qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
2004 qedf_flush_active_ios(fcport, -1);
2006 kref_put(&io_req->refcount, qedf_release_cmd);
2008 if (rc != SUCCESS) {
2009 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2012 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2019 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2021 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2022 struct fc_rport_libfc_priv *rp = rport->dd_data;
2023 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2024 struct qedf_ctx *qedf;
2025 struct fc_lport *lport;
2029 rval = fc_remote_port_chkready(rport);
2032 QEDF_ERR(NULL, "device_reset rport not ready\n");
2037 if (fcport == NULL) {
2038 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2043 qedf = fcport->qedf;
2044 lport = qedf->lport;
2046 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2047 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2052 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2053 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2058 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2064 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2065 struct qedf_ioreq *io_req)
2067 struct fcoe_cqe_rsp_info *fcp_rsp;
2069 fcp_rsp = &cqe->cqe_info.rsp_info;
2070 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2072 io_req->sc_cmd = NULL;
2073 complete(&io_req->tm_done);
2076 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2077 struct fcoe_cqe *cqe)
2079 unsigned long flags;
2081 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2082 u32 payload_len, crc;
2083 struct fc_frame_header *fh;
2084 struct fc_frame *fp;
2085 struct qedf_io_work *io_work;
2088 struct scsi_bd *p_bd_info;
2090 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2091 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2092 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2093 le32_to_cpu(p_bd_info->address.hi),
2094 le32_to_cpu(p_bd_info->address.lo),
2095 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2096 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2097 qedf->bdq_prod_idx, pktlen);
2099 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2100 if (bdq_idx >= QEDF_BDQ_SIZE) {
2101 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2103 goto increment_prod;
2106 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2108 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2109 "unsolicited packet.\n");
2110 goto increment_prod;
2113 if (qedf_dump_frames) {
2114 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2115 "BDQ frame is at addr=%p.\n", bdq_addr);
2116 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2117 (void *)bdq_addr, pktlen, false);
2120 /* Allocate frame */
2121 payload_len = pktlen - sizeof(struct fc_frame_header);
2122 fp = fc_frame_alloc(qedf->lport, payload_len);
2124 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2125 goto increment_prod;
2128 /* Copy data from BDQ buffer into fc_frame struct */
2129 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2130 memcpy(fh, (void *)bdq_addr, pktlen);
2132 /* Initialize the frame so libfc sees it as a valid frame */
2133 crc = fcoe_fc_crc(fp);
2135 fr_dev(fp) = qedf->lport;
2136 fr_sof(fp) = FC_SOF_I3;
2137 fr_eof(fp) = FC_EOF_T;
2138 fr_crc(fp) = cpu_to_le32(~crc);
2141 * We need to return the frame back up to libfc in a non-atomic
2144 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2146 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2147 "work for I/O completion.\n");
2149 goto increment_prod;
2151 memset(io_work, 0, sizeof(struct qedf_io_work));
2153 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2155 /* Copy contents of CQE for deferred processing */
2156 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2158 io_work->qedf = qedf;
2161 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2163 spin_lock_irqsave(&qedf->hba_lock, flags);
2165 /* Increment producer to let f/w know we've handled the frame */
2166 qedf->bdq_prod_idx++;
2168 /* Producer index wraps at uint16_t boundary */
2169 if (qedf->bdq_prod_idx == 0xffff)
2170 qedf->bdq_prod_idx = 0;
2172 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2173 tmp = readw(qedf->bdq_primary_prod);
2174 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2175 tmp = readw(qedf->bdq_secondary_prod);
2177 spin_unlock_irqrestore(&qedf->hba_lock, flags);