2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/mempool.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/workqueue.h>
22 #include <linux/pci.h>
23 #include <linux/scatterlist.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/delay.h>
29 #include <linux/gfp.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/fc/fc_els.h>
36 #include <scsi/fc/fc_fcoe.h>
37 #include <scsi/libfc.h>
38 #include <scsi/fc_frame.h>
42 const char *fnic_state_str[] = {
43 [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
44 [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
45 [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
46 [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
49 static const char *fnic_ioreq_state_str[] = {
50 [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
51 [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
52 [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
53 [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
54 [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
57 static const char *fcpio_status_str[] = {
58 [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
59 [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
60 [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
61 [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
62 [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
63 [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
64 [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
65 [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
66 [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
67 [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
68 [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
69 [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
70 [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
71 [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
72 [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
73 [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
74 [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
75 [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
76 [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
79 const char *fnic_state_to_str(unsigned int state)
81 if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
84 return fnic_state_str[state];
87 static const char *fnic_ioreq_state_to_str(unsigned int state)
89 if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
90 !fnic_ioreq_state_str[state])
93 return fnic_ioreq_state_str[state];
96 static const char *fnic_fcpio_status_to_str(unsigned int status)
98 if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
101 return fcpio_status_str[status];
104 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
106 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
107 struct scsi_cmnd *sc)
109 u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
111 return &fnic->io_req_lock[hash];
114 static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
117 return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
121 * Unmap the data buffer and sense buffer for an io_req,
122 * also unmap and free the device-private scatter/gather list.
124 static void fnic_release_ioreq_buf(struct fnic *fnic,
125 struct fnic_io_req *io_req,
126 struct scsi_cmnd *sc)
128 if (io_req->sgl_list_pa)
129 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
130 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
135 mempool_free(io_req->sgl_list_alloc,
136 fnic->io_sgl_pool[io_req->sgl_type]);
137 if (io_req->sense_buf_pa)
138 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
139 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
142 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
145 /* if no Ack received from firmware, then nothing to clean */
146 if (!fnic->fw_ack_recd[0])
150 * Update desc_available count based on number of freed descriptors
151 * Account for wraparound
153 if (wq->to_clean_index <= fnic->fw_ack_index[0])
154 wq->ring.desc_avail += (fnic->fw_ack_index[0]
155 - wq->to_clean_index + 1);
157 wq->ring.desc_avail += (wq->ring.desc_count
159 + fnic->fw_ack_index[0] + 1);
162 * just bump clean index to ack_index+1 accounting for wraparound
163 * this will essentially free up all descriptors between
164 * to_clean_index and fw_ack_index, both inclusive
167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
169 /* we have processed the acks received so far */
170 fnic->fw_ack_recd[0] = 0;
176 * __fnic_set_state_flags
177 * Sets/Clears bits in fnic's state_flags
180 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
181 unsigned long clearbits)
183 struct Scsi_Host *host = fnic->lport->host;
184 int sh_locked = spin_is_locked(host->host_lock);
185 unsigned long flags = 0;
188 spin_lock_irqsave(host->host_lock, flags);
191 fnic->state_flags &= ~st_flags;
193 fnic->state_flags |= st_flags;
196 spin_unlock_irqrestore(host->host_lock, flags);
203 * fnic_fw_reset_handler
204 * Routine to send reset msg to fw
206 int fnic_fw_reset_handler(struct fnic *fnic)
208 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
212 /* indicate fwreset to io path */
213 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
215 skb_queue_purge(&fnic->frame_queue);
216 skb_queue_purge(&fnic->tx_queue);
218 /* wait for io cmpl */
219 while (atomic_read(&fnic->in_flight))
220 schedule_timeout(msecs_to_jiffies(1));
222 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
224 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
225 free_wq_copy_descs(fnic, wq);
227 if (!vnic_wq_copy_desc_avail(wq))
230 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
231 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
232 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
233 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
234 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
236 &fnic->fnic_stats.fw_stats.active_fw_reqs));
239 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
242 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
243 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
244 "Issued fw reset\n");
246 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
247 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
248 "Failed to issue fw reset\n");
256 * fnic_flogi_reg_handler
257 * Routine to send flogi register msg to fw
259 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
261 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
262 enum fcpio_flogi_reg_format_type format;
263 struct fc_lport *lp = fnic->lport;
268 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
270 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
271 free_wq_copy_descs(fnic, wq);
273 if (!vnic_wq_copy_desc_avail(wq)) {
275 goto flogi_reg_ioreq_end;
278 if (fnic->ctlr.map_dest) {
279 memset(gw_mac, 0xff, ETH_ALEN);
280 format = FCPIO_FLOGI_REG_DEF_DEST;
282 memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
283 format = FCPIO_FLOGI_REG_GW_DEST;
286 if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
287 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
290 lp->r_a_tov, lp->e_d_tov);
291 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
292 "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
293 fc_id, fnic->data_src_addr, gw_mac);
295 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
296 format, fc_id, gw_mac);
297 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
298 "FLOGI reg issued fcid %x map %d dest %pM\n",
299 fc_id, fnic->ctlr.map_dest, gw_mac);
302 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
303 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
304 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
305 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
306 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
309 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
314 * fnic_queue_wq_copy_desc
315 * Routine to enqueue a wq copy desc
317 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
318 struct vnic_wq_copy *wq,
319 struct fnic_io_req *io_req,
320 struct scsi_cmnd *sc,
323 struct scatterlist *sg;
324 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
325 struct fc_rport_libfc_priv *rp = rport->dd_data;
326 struct host_sg_desc *desc;
327 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
329 unsigned long intr_flags;
332 struct scsi_lun fc_lun;
335 /* For each SGE, create a device desc entry */
336 desc = io_req->sgl_list;
337 for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
338 desc->addr = cpu_to_le64(sg_dma_address(sg));
339 desc->len = cpu_to_le32(sg_dma_len(sg));
344 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
346 sizeof(io_req->sgl_list[0]) * sg_count,
348 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
349 printk(KERN_ERR "DMA mapping failed\n");
350 return SCSI_MLQUEUE_HOST_BUSY;
354 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
356 SCSI_SENSE_BUFFERSIZE,
358 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
359 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
360 sizeof(io_req->sgl_list[0]) * sg_count,
362 printk(KERN_ERR "DMA mapping failed\n");
363 return SCSI_MLQUEUE_HOST_BUSY;
366 int_to_scsilun(sc->device->lun, &fc_lun);
368 /* Enqueue the descriptor in the Copy WQ */
369 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
371 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
372 free_wq_copy_descs(fnic, wq);
374 if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
375 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
376 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
377 "fnic_queue_wq_copy_desc failure - no descriptors\n");
378 atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
379 return SCSI_MLQUEUE_HOST_BUSY;
383 if (sc->sc_data_direction == DMA_FROM_DEVICE)
384 flags = FCPIO_ICMND_RDDATA;
385 else if (sc->sc_data_direction == DMA_TO_DEVICE)
386 flags = FCPIO_ICMND_WRDATA;
389 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
390 (rp->flags & FC_RP_FLAGS_RETRY))
391 exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
393 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
394 0, exch_flags, io_req->sgl_cnt,
395 SCSI_SENSE_BUFFERSIZE,
397 io_req->sense_buf_pa,
398 0, /* scsi cmd ref, always 0 */
399 FCPIO_ICMND_PTA_SIMPLE,
400 /* scsi pri and tag */
401 flags, /* command flags */
402 sc->cmnd, sc->cmd_len,
404 fc_lun.scsi_lun, io_req->port_id,
405 rport->maxframe_size, rp->r_a_tov,
408 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
409 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
410 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
411 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
412 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
414 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
420 * Routine to send a scsi cdb
421 * Called with host_lock held and interrupts disabled.
423 static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
425 struct fc_lport *lp = shost_priv(sc->device->host);
426 struct fc_rport *rport;
427 struct fnic_io_req *io_req = NULL;
428 struct fnic *fnic = lport_priv(lp);
429 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
430 struct vnic_wq_copy *wq;
434 unsigned long flags = 0;
436 spinlock_t *io_lock = NULL;
437 int io_lock_acquired = 0;
438 struct fc_rport_libfc_priv *rp;
440 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
441 return SCSI_MLQUEUE_HOST_BUSY;
443 rport = starget_to_rport(scsi_target(sc->device));
445 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
446 "returning DID_NO_CONNECT for IO as rport is NULL\n");
447 sc->result = DID_NO_CONNECT << 16;
452 ret = fc_remote_port_chkready(rport);
454 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
455 "rport is not ready\n");
456 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
463 if (!rp || rp->rp_state == RPORT_ST_DELETE) {
464 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
465 "rport 0x%x removed, returning DID_NO_CONNECT\n",
468 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
469 sc->result = DID_NO_CONNECT<<16;
474 if (rp->rp_state != RPORT_ST_READY) {
475 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
476 "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
477 rport->port_id, rp->rp_state);
479 sc->result = DID_IMM_RETRY << 16;
484 if (lp->state != LPORT_ST_READY || !(lp->link_up))
485 return SCSI_MLQUEUE_HOST_BUSY;
487 atomic_inc(&fnic->in_flight);
490 * Release host lock, use driver resource specific locks from here.
491 * Don't re-enable interrupts in case they were disabled prior to the
492 * caller disabling them.
494 spin_unlock(lp->host->host_lock);
495 CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
496 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
498 /* Get a new io_req for this SCSI IO */
499 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
501 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
502 ret = SCSI_MLQUEUE_HOST_BUSY;
505 memset(io_req, 0, sizeof(*io_req));
507 /* Map the data buffer */
508 sg_count = scsi_dma_map(sc);
510 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
511 sc->request->tag, sc, 0, sc->cmnd[0],
512 sg_count, CMD_STATE(sc));
513 mempool_free(io_req, fnic->io_req_pool);
517 /* Determine the type of scatter/gather list we need */
518 io_req->sgl_cnt = sg_count;
519 io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
520 if (sg_count > FNIC_DFLT_SG_DESC_CNT)
521 io_req->sgl_type = FNIC_SGL_CACHE_MAX;
525 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
527 if (!io_req->sgl_list) {
528 atomic64_inc(&fnic_stats->io_stats.alloc_failures);
529 ret = SCSI_MLQUEUE_HOST_BUSY;
531 mempool_free(io_req, fnic->io_req_pool);
535 /* Cache sgl list allocated address before alignment */
536 io_req->sgl_list_alloc = io_req->sgl_list;
537 ptr = (unsigned long) io_req->sgl_list;
538 if (ptr % FNIC_SG_DESC_ALIGN) {
539 io_req->sgl_list = (struct host_sg_desc *)
540 (((unsigned long) ptr
541 + FNIC_SG_DESC_ALIGN - 1)
542 & ~(FNIC_SG_DESC_ALIGN - 1));
547 * Will acquire lock defore setting to IO initialized.
550 io_lock = fnic_io_lock_hash(fnic, sc);
551 spin_lock_irqsave(io_lock, flags);
553 /* initialize rest of io_req */
554 io_lock_acquired = 1;
555 io_req->port_id = rport->port_id;
556 io_req->start_time = jiffies;
557 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
558 CMD_SP(sc) = (char *)io_req;
559 CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
560 sc->scsi_done = done;
562 /* create copy wq desc and enqueue it */
563 wq = &fnic->wq_copy[0];
564 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
567 * In case another thread cancelled the request,
568 * refetch the pointer under the lock.
570 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
571 sc->request->tag, sc, 0, 0, 0,
572 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
573 io_req = (struct fnic_io_req *)CMD_SP(sc);
575 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
576 spin_unlock_irqrestore(io_lock, flags);
578 fnic_release_ioreq_buf(fnic, io_req, sc);
579 mempool_free(io_req, fnic->io_req_pool);
581 atomic_dec(&fnic->in_flight);
582 /* acquire host lock before returning to SCSI */
583 spin_lock(lp->host->host_lock);
586 atomic64_inc(&fnic_stats->io_stats.active_ios);
587 atomic64_inc(&fnic_stats->io_stats.num_ios);
588 if (atomic64_read(&fnic_stats->io_stats.active_ios) >
589 atomic64_read(&fnic_stats->io_stats.max_active_ios))
590 atomic64_set(&fnic_stats->io_stats.max_active_ios,
591 atomic64_read(&fnic_stats->io_stats.active_ios));
593 /* REVISIT: Use per IO lock in the final code */
594 CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
597 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
598 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
599 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
602 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
603 sc->request->tag, sc, io_req,
605 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
607 /* if only we issued IO, will we have the io lock */
608 if (io_lock_acquired)
609 spin_unlock_irqrestore(io_lock, flags);
611 atomic_dec(&fnic->in_flight);
612 /* acquire host lock before returning to SCSI */
613 spin_lock(lp->host->host_lock);
617 DEF_SCSI_QCMD(fnic_queuecommand)
620 * fnic_fcpio_fw_reset_cmpl_handler
621 * Routine to handle fw reset completion
623 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
624 struct fcpio_fw_req *desc)
628 struct fcpio_tag tag;
631 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
633 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
635 atomic64_inc(&reset_stats->fw_reset_completions);
637 /* Clean up all outstanding io requests */
638 fnic_cleanup_io(fnic, SCSI_NO_TAG);
640 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
641 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
642 atomic64_set(&fnic->io_cmpl_skip, 0);
644 spin_lock_irqsave(&fnic->fnic_lock, flags);
646 /* fnic should be in FC_TRANS_ETH_MODE */
647 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
648 /* Check status of reset completion */
650 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
651 "reset cmpl success\n");
652 /* Ready to send flogi out */
653 fnic->state = FNIC_IN_ETH_MODE;
655 FNIC_SCSI_DBG(KERN_DEBUG,
657 "fnic fw_reset : failed %s\n",
658 fnic_fcpio_status_to_str(hdr_status));
661 * Unable to change to eth mode, cannot send out flogi
662 * Change state to fc mode, so that subsequent Flogi
663 * requests from libFC will cause more attempts to
664 * reset the firmware. Free the cached flogi
666 fnic->state = FNIC_IN_FC_MODE;
667 atomic64_inc(&reset_stats->fw_reset_failures);
671 FNIC_SCSI_DBG(KERN_DEBUG,
673 "Unexpected state %s while processing"
674 " reset cmpl\n", fnic_state_to_str(fnic->state));
675 atomic64_inc(&reset_stats->fw_reset_failures);
679 /* Thread removing device blocks till firmware reset is complete */
680 if (fnic->remove_wait)
681 complete(fnic->remove_wait);
684 * If fnic is being removed, or fw reset failed
685 * free the flogi frame. Else, send it out
687 if (fnic->remove_wait || ret) {
688 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
689 skb_queue_purge(&fnic->tx_queue);
690 goto reset_cmpl_handler_end;
693 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
697 reset_cmpl_handler_end:
698 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
704 * fnic_fcpio_flogi_reg_cmpl_handler
705 * Routine to handle flogi register completion
707 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
708 struct fcpio_fw_req *desc)
712 struct fcpio_tag tag;
716 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
718 /* Update fnic state based on status of flogi reg completion */
719 spin_lock_irqsave(&fnic->fnic_lock, flags);
721 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
723 /* Check flogi registration completion status */
725 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
726 "flog reg succeeded\n");
727 fnic->state = FNIC_IN_FC_MODE;
729 FNIC_SCSI_DBG(KERN_DEBUG,
731 "fnic flogi reg :failed %s\n",
732 fnic_fcpio_status_to_str(hdr_status));
733 fnic->state = FNIC_IN_ETH_MODE;
737 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
738 "Unexpected fnic state %s while"
739 " processing flogi reg completion\n",
740 fnic_state_to_str(fnic->state));
745 if (fnic->stop_rx_link_events) {
746 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
747 goto reg_cmpl_handler_end;
749 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
752 queue_work(fnic_event_queue, &fnic->frame_work);
754 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
757 reg_cmpl_handler_end:
761 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
764 if (wq->to_clean_index <= wq->to_use_index) {
765 /* out of range, stale request_out index */
766 if (request_out < wq->to_clean_index ||
767 request_out >= wq->to_use_index)
770 /* out of range, stale request_out index */
771 if (request_out < wq->to_clean_index &&
772 request_out >= wq->to_use_index)
775 /* request_out index is in range */
781 * Mark that ack received and store the Ack index. If there are multiple
782 * acks received before Tx thread cleans it up, the latest value will be
783 * used which is correct behavior. This state should be in the copy Wq
784 * instead of in the fnic
786 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
787 unsigned int cq_index,
788 struct fcpio_fw_req *desc)
790 struct vnic_wq_copy *wq;
791 u16 request_out = desc->u.ack.request_out;
793 u64 *ox_id_tag = (u64 *)(void *)desc;
795 /* mark the ack state */
796 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
797 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
799 fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
800 if (is_ack_index_in_range(wq, request_out)) {
801 fnic->fw_ack_index[0] = request_out;
802 fnic->fw_ack_recd[0] = 1;
805 &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
807 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
808 FNIC_TRACE(fnic_fcpio_ack_handler,
809 fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
810 ox_id_tag[4], ox_id_tag[5]);
814 * fnic_fcpio_icmnd_cmpl_handler
815 * Routine to handle icmnd completions
817 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
818 struct fcpio_fw_req *desc)
822 struct fcpio_tag tag;
825 struct fcpio_icmnd_cmpl *icmnd_cmpl;
826 struct fnic_io_req *io_req;
827 struct scsi_cmnd *sc;
828 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
832 unsigned long start_time;
833 unsigned long io_duration_time;
835 /* Decode the cmpl description to get the io_req id */
836 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
837 fcpio_tag_id_dec(&tag, &id);
838 icmnd_cmpl = &desc->u.icmnd_cmpl;
840 if (id >= fnic->fnic_max_tag_id) {
841 shost_printk(KERN_ERR, fnic->lport->host,
842 "Tag out of range tag %x hdr status = %s\n",
843 id, fnic_fcpio_status_to_str(hdr_status));
847 sc = scsi_host_find_tag(fnic->lport->host, id);
850 atomic64_inc(&fnic_stats->io_stats.sc_null);
851 shost_printk(KERN_ERR, fnic->lport->host,
852 "icmnd_cmpl sc is null - "
853 "hdr status = %s tag = 0x%x desc = 0x%p\n",
854 fnic_fcpio_status_to_str(hdr_status), id, desc);
855 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
856 fnic->lport->host->host_no, id,
857 ((u64)icmnd_cmpl->_resvd0[1] << 16 |
858 (u64)icmnd_cmpl->_resvd0[0]),
859 ((u64)hdr_status << 16 |
860 (u64)icmnd_cmpl->scsi_status << 8 |
861 (u64)icmnd_cmpl->flags), desc,
862 (u64)icmnd_cmpl->residual, 0);
866 io_lock = fnic_io_lock_hash(fnic, sc);
867 spin_lock_irqsave(io_lock, flags);
868 io_req = (struct fnic_io_req *)CMD_SP(sc);
869 WARN_ON_ONCE(!io_req);
871 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
872 CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
873 spin_unlock_irqrestore(io_lock, flags);
874 shost_printk(KERN_ERR, fnic->lport->host,
875 "icmnd_cmpl io_req is null - "
876 "hdr status = %s tag = 0x%x sc 0x%p\n",
877 fnic_fcpio_status_to_str(hdr_status), id, sc);
880 start_time = io_req->start_time;
882 /* firmware completed the io */
883 io_req->io_completed = 1;
886 * if SCSI-ML has already issued abort on this command,
887 * set completion of the IO. The abts path will clean it up
889 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
892 * set the FNIC_IO_DONE so that this doesn't get
893 * flagged as 'out of order' if it was not aborted
895 CMD_FLAGS(sc) |= FNIC_IO_DONE;
896 CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
897 spin_unlock_irqrestore(io_lock, flags);
898 if(FCPIO_ABORTED == hdr_status)
899 CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
901 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
902 "icmnd_cmpl abts pending "
903 "hdr status = %s tag = 0x%x sc = 0x%p "
904 "scsi_status = %x residual = %d\n",
905 fnic_fcpio_status_to_str(hdr_status),
907 icmnd_cmpl->scsi_status,
908 icmnd_cmpl->residual);
912 /* Mark the IO as complete */
913 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
915 icmnd_cmpl = &desc->u.icmnd_cmpl;
917 switch (hdr_status) {
919 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
920 xfer_len = scsi_bufflen(sc);
921 scsi_set_resid(sc, icmnd_cmpl->residual);
923 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
924 xfer_len -= icmnd_cmpl->residual;
926 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
927 atomic64_inc(&fnic_stats->misc_stats.check_condition);
929 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
930 atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
933 case FCPIO_TIMEOUT: /* request was timed out */
934 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
935 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
938 case FCPIO_ABORTED: /* request was aborted */
939 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
940 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
943 case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
944 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
945 scsi_set_resid(sc, icmnd_cmpl->residual);
946 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
949 case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
950 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
951 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
954 case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
955 atomic64_inc(&fnic_stats->io_stats.io_not_found);
956 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
959 case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
960 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
961 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
964 case FCPIO_FW_ERR: /* request was terminated due fw error */
965 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
966 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
969 case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
970 atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
971 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
974 case FCPIO_INVALID_HEADER: /* header contains invalid data */
975 case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
976 case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
978 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
982 /* Break link with the SCSI command */
984 CMD_FLAGS(sc) |= FNIC_IO_DONE;
986 spin_unlock_irqrestore(io_lock, flags);
988 if (hdr_status != FCPIO_SUCCESS) {
989 atomic64_inc(&fnic_stats->io_stats.io_failures);
990 shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
991 fnic_fcpio_status_to_str(hdr_status));
994 fnic_release_ioreq_buf(fnic, io_req, sc);
996 mempool_free(io_req, fnic->io_req_pool);
998 cmd_trace = ((u64)hdr_status << 56) |
999 (u64)icmnd_cmpl->scsi_status << 48 |
1000 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
1001 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1002 (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
1004 FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
1005 sc->device->host->host_no, id, sc,
1006 ((u64)icmnd_cmpl->_resvd0[1] << 56 |
1007 (u64)icmnd_cmpl->_resvd0[0] << 48 |
1008 jiffies_to_msecs(jiffies - start_time)),
1010 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1012 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1013 fnic->lport->host_stats.fcp_input_requests++;
1014 fnic->fcp_input_bytes += xfer_len;
1015 } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1016 fnic->lport->host_stats.fcp_output_requests++;
1017 fnic->fcp_output_bytes += xfer_len;
1019 fnic->lport->host_stats.fcp_control_requests++;
1021 atomic64_dec(&fnic_stats->io_stats.active_ios);
1022 if (atomic64_read(&fnic->io_cmpl_skip))
1023 atomic64_dec(&fnic->io_cmpl_skip);
1025 atomic64_inc(&fnic_stats->io_stats.io_completions);
1028 io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1030 if(io_duration_time <= 10)
1031 atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1032 else if(io_duration_time <= 100)
1033 atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1034 else if(io_duration_time <= 500)
1035 atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1036 else if(io_duration_time <= 5000)
1037 atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1038 else if(io_duration_time <= 10000)
1039 atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1040 else if(io_duration_time <= 30000)
1041 atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1043 atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1045 if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1046 atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1049 /* Call SCSI completion function to complete the IO */
1054 /* fnic_fcpio_itmf_cmpl_handler
1055 * Routine to handle itmf completions
1057 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1058 struct fcpio_fw_req *desc)
1062 struct fcpio_tag tag;
1064 struct scsi_cmnd *sc;
1065 struct fnic_io_req *io_req;
1066 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1067 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1068 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1069 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1070 unsigned long flags;
1071 spinlock_t *io_lock;
1072 unsigned long start_time;
1074 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1075 fcpio_tag_id_dec(&tag, &id);
1077 if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1078 shost_printk(KERN_ERR, fnic->lport->host,
1079 "Tag out of range tag %x hdr status = %s\n",
1080 id, fnic_fcpio_status_to_str(hdr_status));
1084 sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1087 atomic64_inc(&fnic_stats->io_stats.sc_null);
1088 shost_printk(KERN_ERR, fnic->lport->host,
1089 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1090 fnic_fcpio_status_to_str(hdr_status), id);
1093 io_lock = fnic_io_lock_hash(fnic, sc);
1094 spin_lock_irqsave(io_lock, flags);
1095 io_req = (struct fnic_io_req *)CMD_SP(sc);
1096 WARN_ON_ONCE(!io_req);
1098 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1099 spin_unlock_irqrestore(io_lock, flags);
1100 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1101 shost_printk(KERN_ERR, fnic->lport->host,
1102 "itmf_cmpl io_req is null - "
1103 "hdr status = %s tag = 0x%x sc 0x%p\n",
1104 fnic_fcpio_status_to_str(hdr_status), id, sc);
1107 start_time = io_req->start_time;
1109 if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1110 /* Abort and terminate completion of device reset req */
1111 /* REVISIT : Add asserts about various flags */
1112 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1113 "dev reset abts cmpl recd. id %x status %s\n",
1114 id, fnic_fcpio_status_to_str(hdr_status));
1115 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1116 CMD_ABTS_STATUS(sc) = hdr_status;
1117 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1118 if (io_req->abts_done)
1119 complete(io_req->abts_done);
1120 spin_unlock_irqrestore(io_lock, flags);
1121 } else if (id & FNIC_TAG_ABORT) {
1122 /* Completion of abort cmd */
1123 switch (hdr_status) {
1127 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1128 atomic64_inc(&abts_stats->abort_fw_timeouts);
1131 &term_stats->terminate_fw_timeouts);
1133 case FCPIO_ITMF_REJECTED:
1134 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1135 "abort reject recd. id %d\n",
1136 (int)(id & FNIC_TAG_MASK));
1138 case FCPIO_IO_NOT_FOUND:
1139 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1140 atomic64_inc(&abts_stats->abort_io_not_found);
1143 &term_stats->terminate_io_not_found);
1146 if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1147 atomic64_inc(&abts_stats->abort_failures);
1150 &term_stats->terminate_failures);
1153 if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1154 /* This is a late completion. Ignore it */
1155 spin_unlock_irqrestore(io_lock, flags);
1159 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1160 CMD_ABTS_STATUS(sc) = hdr_status;
1162 /* If the status is IO not found consider it as success */
1163 if (hdr_status == FCPIO_IO_NOT_FOUND)
1164 CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
1166 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1167 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1169 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1170 "abts cmpl recd. id %d status %s\n",
1171 (int)(id & FNIC_TAG_MASK),
1172 fnic_fcpio_status_to_str(hdr_status));
1175 * If scsi_eh thread is blocked waiting for abts to complete,
1176 * signal completion to it. IO will be cleaned in the thread
1177 * else clean it in this context
1179 if (io_req->abts_done) {
1180 complete(io_req->abts_done);
1181 spin_unlock_irqrestore(io_lock, flags);
1183 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1184 "abts cmpl, completing IO\n");
1186 sc->result = (DID_ERROR << 16);
1188 spin_unlock_irqrestore(io_lock, flags);
1190 fnic_release_ioreq_buf(fnic, io_req, sc);
1191 mempool_free(io_req, fnic->io_req_pool);
1192 if (sc->scsi_done) {
1193 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1194 sc->device->host->host_no, id,
1196 jiffies_to_msecs(jiffies - start_time),
1198 (((u64)hdr_status << 40) |
1199 (u64)sc->cmnd[0] << 32 |
1200 (u64)sc->cmnd[2] << 24 |
1201 (u64)sc->cmnd[3] << 16 |
1202 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1203 (((u64)CMD_FLAGS(sc) << 32) |
1206 atomic64_dec(&fnic_stats->io_stats.active_ios);
1207 if (atomic64_read(&fnic->io_cmpl_skip))
1208 atomic64_dec(&fnic->io_cmpl_skip);
1210 atomic64_inc(&fnic_stats->io_stats.io_completions);
1214 } else if (id & FNIC_TAG_DEV_RST) {
1215 /* Completion of device reset */
1216 CMD_LR_STATUS(sc) = hdr_status;
1217 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1218 spin_unlock_irqrestore(io_lock, flags);
1219 CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1220 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1221 sc->device->host->host_no, id, sc,
1222 jiffies_to_msecs(jiffies - start_time),
1224 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1225 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1226 "Terminate pending "
1227 "dev reset cmpl recd. id %d status %s\n",
1228 (int)(id & FNIC_TAG_MASK),
1229 fnic_fcpio_status_to_str(hdr_status));
1232 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1233 /* Need to wait for terminate completion */
1234 spin_unlock_irqrestore(io_lock, flags);
1235 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1236 sc->device->host->host_no, id, sc,
1237 jiffies_to_msecs(jiffies - start_time),
1239 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1240 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1241 "dev reset cmpl recd after time out. "
1242 "id %d status %s\n",
1243 (int)(id & FNIC_TAG_MASK),
1244 fnic_fcpio_status_to_str(hdr_status));
1247 CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1248 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1249 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1250 "dev reset cmpl recd. id %d status %s\n",
1251 (int)(id & FNIC_TAG_MASK),
1252 fnic_fcpio_status_to_str(hdr_status));
1253 if (io_req->dr_done)
1254 complete(io_req->dr_done);
1255 spin_unlock_irqrestore(io_lock, flags);
1258 shost_printk(KERN_ERR, fnic->lport->host,
1259 "Unexpected itmf io state %s tag %x\n",
1260 fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1261 spin_unlock_irqrestore(io_lock, flags);
1267 * fnic_fcpio_cmpl_handler
1268 * Routine to service the cq for wq_copy
1270 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1271 unsigned int cq_index,
1272 struct fcpio_fw_req *desc)
1274 struct fnic *fnic = vnic_dev_priv(vdev);
1276 switch (desc->hdr.type) {
1277 case FCPIO_ICMND_CMPL: /* fw completed a command */
1278 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1279 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1280 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1281 case FCPIO_RESET_CMPL: /* fw completed reset */
1282 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1288 switch (desc->hdr.type) {
1289 case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1290 fnic_fcpio_ack_handler(fnic, cq_index, desc);
1293 case FCPIO_ICMND_CMPL: /* fw completed a command */
1294 fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1297 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1298 fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1301 case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1302 case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1303 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1306 case FCPIO_RESET_CMPL: /* fw completed reset */
1307 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1311 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1312 "firmware completion type %d\n",
1321 * fnic_wq_copy_cmpl_handler
1322 * Routine to process wq copy
1324 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1326 unsigned int wq_work_done = 0;
1327 unsigned int i, cq_index;
1328 unsigned int cur_work_done;
1330 for (i = 0; i < fnic->wq_copy_count; i++) {
1331 cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1332 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1333 fnic_fcpio_cmpl_handler,
1335 wq_work_done += cur_work_done;
1337 return wq_work_done;
1340 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1343 struct fnic_io_req *io_req;
1344 unsigned long flags = 0;
1345 struct scsi_cmnd *sc;
1346 spinlock_t *io_lock;
1347 unsigned long start_time = 0;
1348 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1350 for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1351 if (i == exclude_id)
1354 io_lock = fnic_io_lock_tag(fnic, i);
1355 spin_lock_irqsave(io_lock, flags);
1356 sc = scsi_host_find_tag(fnic->lport->host, i);
1358 spin_unlock_irqrestore(io_lock, flags);
1362 io_req = (struct fnic_io_req *)CMD_SP(sc);
1363 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1364 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1366 * We will be here only when FW completes reset
1367 * without sending completions for outstanding ios.
1369 CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1370 if (io_req && io_req->dr_done)
1371 complete(io_req->dr_done);
1372 else if (io_req && io_req->abts_done)
1373 complete(io_req->abts_done);
1374 spin_unlock_irqrestore(io_lock, flags);
1376 } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1377 spin_unlock_irqrestore(io_lock, flags);
1381 spin_unlock_irqrestore(io_lock, flags);
1382 goto cleanup_scsi_cmd;
1387 spin_unlock_irqrestore(io_lock, flags);
1390 * If there is a scsi_cmnd associated with this io_req, then
1391 * free the corresponding state
1393 start_time = io_req->start_time;
1394 fnic_release_ioreq_buf(fnic, io_req, sc);
1395 mempool_free(io_req, fnic->io_req_pool);
1398 sc->result = DID_TRANSPORT_DISRUPTED << 16;
1399 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1400 "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
1401 __func__, (jiffies - start_time));
1403 if (atomic64_read(&fnic->io_cmpl_skip))
1404 atomic64_dec(&fnic->io_cmpl_skip);
1406 atomic64_inc(&fnic_stats->io_stats.io_completions);
1408 /* Complete the command to SCSI */
1409 if (sc->scsi_done) {
1410 FNIC_TRACE(fnic_cleanup_io,
1411 sc->device->host->host_no, i, sc,
1412 jiffies_to_msecs(jiffies - start_time),
1413 0, ((u64)sc->cmnd[0] << 32 |
1414 (u64)sc->cmnd[2] << 24 |
1415 (u64)sc->cmnd[3] << 16 |
1416 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1417 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1424 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1425 struct fcpio_host_req *desc)
1428 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1429 struct fnic_io_req *io_req;
1430 struct scsi_cmnd *sc;
1431 unsigned long flags;
1432 spinlock_t *io_lock;
1433 unsigned long start_time = 0;
1435 /* get the tag reference */
1436 fcpio_tag_id_dec(&desc->hdr.tag, &id);
1437 id &= FNIC_TAG_MASK;
1439 if (id >= fnic->fnic_max_tag_id)
1442 sc = scsi_host_find_tag(fnic->lport->host, id);
1446 io_lock = fnic_io_lock_hash(fnic, sc);
1447 spin_lock_irqsave(io_lock, flags);
1449 /* Get the IO context which this desc refers to */
1450 io_req = (struct fnic_io_req *)CMD_SP(sc);
1452 /* fnic interrupts are turned off by now */
1455 spin_unlock_irqrestore(io_lock, flags);
1456 goto wq_copy_cleanup_scsi_cmd;
1461 spin_unlock_irqrestore(io_lock, flags);
1463 start_time = io_req->start_time;
1464 fnic_release_ioreq_buf(fnic, io_req, sc);
1465 mempool_free(io_req, fnic->io_req_pool);
1467 wq_copy_cleanup_scsi_cmd:
1468 sc->result = DID_NO_CONNECT << 16;
1469 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1470 " DID_NO_CONNECT\n");
1472 if (sc->scsi_done) {
1473 FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1474 sc->device->host->host_no, id, sc,
1475 jiffies_to_msecs(jiffies - start_time),
1476 0, ((u64)sc->cmnd[0] << 32 |
1477 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1478 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1479 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1485 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1486 u32 task_req, u8 *fc_lun,
1487 struct fnic_io_req *io_req)
1489 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1490 struct Scsi_Host *host = fnic->lport->host;
1491 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1492 unsigned long flags;
1494 spin_lock_irqsave(host->host_lock, flags);
1495 if (unlikely(fnic_chk_state_flags_locked(fnic,
1496 FNIC_FLAGS_IO_BLOCKED))) {
1497 spin_unlock_irqrestore(host->host_lock, flags);
1500 atomic_inc(&fnic->in_flight);
1501 spin_unlock_irqrestore(host->host_lock, flags);
1503 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1505 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1506 free_wq_copy_descs(fnic, wq);
1508 if (!vnic_wq_copy_desc_avail(wq)) {
1509 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1510 atomic_dec(&fnic->in_flight);
1511 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1512 "fnic_queue_abort_io_req: failure: no descriptors\n");
1513 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1516 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1517 0, task_req, tag, fc_lun, io_req->port_id,
1518 fnic->config.ra_tov, fnic->config.ed_tov);
1520 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1521 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1522 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1523 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1524 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1526 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1527 atomic_dec(&fnic->in_flight);
1532 static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1537 struct fnic_io_req *io_req;
1538 spinlock_t *io_lock;
1539 unsigned long flags;
1540 struct scsi_cmnd *sc;
1541 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1542 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1543 struct scsi_lun fc_lun;
1544 enum fnic_ioreq_state old_ioreq_state;
1546 FNIC_SCSI_DBG(KERN_DEBUG,
1548 "fnic_rport_exch_reset called portid 0x%06x\n",
1551 if (fnic->in_remove)
1554 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1556 io_lock = fnic_io_lock_tag(fnic, tag);
1557 spin_lock_irqsave(io_lock, flags);
1558 sc = scsi_host_find_tag(fnic->lport->host, tag);
1560 spin_unlock_irqrestore(io_lock, flags);
1564 io_req = (struct fnic_io_req *)CMD_SP(sc);
1566 if (!io_req || io_req->port_id != port_id) {
1567 spin_unlock_irqrestore(io_lock, flags);
1571 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1572 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1573 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1574 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1576 spin_unlock_irqrestore(io_lock, flags);
1581 * Found IO that is still pending with firmware and
1582 * belongs to rport that went away
1584 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1585 spin_unlock_irqrestore(io_lock, flags);
1588 if (io_req->abts_done) {
1589 shost_printk(KERN_ERR, fnic->lport->host,
1590 "fnic_rport_exch_reset: io_req->abts_done is set "
1592 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1595 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1596 shost_printk(KERN_ERR, fnic->lport->host,
1598 "IO not yet issued %p tag 0x%x flags "
1600 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1602 old_ioreq_state = CMD_STATE(sc);
1603 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1604 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1605 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1606 atomic64_inc(&reset_stats->device_reset_terminates);
1607 abt_tag = (tag | FNIC_TAG_DEV_RST);
1608 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1609 "fnic_rport_exch_reset dev rst sc 0x%p\n",
1613 BUG_ON(io_req->abts_done);
1615 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1616 "fnic_rport_reset_exch: Issuing abts\n");
1618 spin_unlock_irqrestore(io_lock, flags);
1620 /* Now queue the abort command to firmware */
1621 int_to_scsilun(sc->device->lun, &fc_lun);
1623 if (fnic_queue_abort_io_req(fnic, abt_tag,
1624 FCPIO_ITMF_ABT_TASK_TERM,
1625 fc_lun.scsi_lun, io_req)) {
1627 * Revert the cmd state back to old state, if
1628 * it hasn't changed in between. This cmd will get
1629 * aborted later by scsi_eh, or cleaned up during
1632 spin_lock_irqsave(io_lock, flags);
1633 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1634 CMD_STATE(sc) = old_ioreq_state;
1635 spin_unlock_irqrestore(io_lock, flags);
1637 spin_lock_irqsave(io_lock, flags);
1638 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1639 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1641 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1642 spin_unlock_irqrestore(io_lock, flags);
1643 atomic64_inc(&term_stats->terminates);
1647 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1648 atomic64_set(&term_stats->max_terminates, term_cnt);
1652 void fnic_terminate_rport_io(struct fc_rport *rport)
1657 struct fnic_io_req *io_req;
1658 spinlock_t *io_lock;
1659 unsigned long flags;
1660 struct scsi_cmnd *sc;
1661 struct scsi_lun fc_lun;
1662 struct fc_rport_libfc_priv *rdata;
1663 struct fc_lport *lport;
1665 struct fc_rport *cmd_rport;
1666 struct reset_stats *reset_stats;
1667 struct terminate_stats *term_stats;
1668 enum fnic_ioreq_state old_ioreq_state;
1671 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1674 rdata = rport->dd_data;
1677 printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1680 lport = rdata->local_port;
1683 printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1686 fnic = lport_priv(lport);
1687 FNIC_SCSI_DBG(KERN_DEBUG,
1688 fnic->lport->host, "fnic_terminate_rport_io called"
1689 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1690 rport->port_name, rport->node_name, rport,
1693 if (fnic->in_remove)
1696 reset_stats = &fnic->fnic_stats.reset_stats;
1697 term_stats = &fnic->fnic_stats.term_stats;
1699 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1701 io_lock = fnic_io_lock_tag(fnic, tag);
1702 spin_lock_irqsave(io_lock, flags);
1703 sc = scsi_host_find_tag(fnic->lport->host, tag);
1705 spin_unlock_irqrestore(io_lock, flags);
1709 cmd_rport = starget_to_rport(scsi_target(sc->device));
1710 if (rport != cmd_rport) {
1711 spin_unlock_irqrestore(io_lock, flags);
1715 io_req = (struct fnic_io_req *)CMD_SP(sc);
1717 if (!io_req || rport != cmd_rport) {
1718 spin_unlock_irqrestore(io_lock, flags);
1722 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1723 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1724 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1725 "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1727 spin_unlock_irqrestore(io_lock, flags);
1731 * Found IO that is still pending with firmware and
1732 * belongs to rport that went away
1734 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1735 spin_unlock_irqrestore(io_lock, flags);
1738 if (io_req->abts_done) {
1739 shost_printk(KERN_ERR, fnic->lport->host,
1740 "fnic_terminate_rport_io: io_req->abts_done is set "
1742 fnic_ioreq_state_to_str(CMD_STATE(sc)));
1744 if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1745 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1746 "fnic_terminate_rport_io "
1747 "IO not yet issued %p tag 0x%x flags "
1749 sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1751 old_ioreq_state = CMD_STATE(sc);
1752 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1753 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1754 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1755 atomic64_inc(&reset_stats->device_reset_terminates);
1756 abt_tag = (tag | FNIC_TAG_DEV_RST);
1757 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1758 "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1761 BUG_ON(io_req->abts_done);
1763 FNIC_SCSI_DBG(KERN_DEBUG,
1765 "fnic_terminate_rport_io: Issuing abts\n");
1767 spin_unlock_irqrestore(io_lock, flags);
1769 /* Now queue the abort command to firmware */
1770 int_to_scsilun(sc->device->lun, &fc_lun);
1772 if (fnic_queue_abort_io_req(fnic, abt_tag,
1773 FCPIO_ITMF_ABT_TASK_TERM,
1774 fc_lun.scsi_lun, io_req)) {
1776 * Revert the cmd state back to old state, if
1777 * it hasn't changed in between. This cmd will get
1778 * aborted later by scsi_eh, or cleaned up during
1781 spin_lock_irqsave(io_lock, flags);
1782 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1783 CMD_STATE(sc) = old_ioreq_state;
1784 spin_unlock_irqrestore(io_lock, flags);
1786 spin_lock_irqsave(io_lock, flags);
1787 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1788 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1790 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1791 spin_unlock_irqrestore(io_lock, flags);
1792 atomic64_inc(&term_stats->terminates);
1796 if (term_cnt > atomic64_read(&term_stats->max_terminates))
1797 atomic64_set(&term_stats->max_terminates, term_cnt);
1802 * This function is exported to SCSI for sending abort cmnds.
1803 * A SCSI IO is represented by a io_req in the driver.
1804 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1806 int fnic_abort_cmd(struct scsi_cmnd *sc)
1808 struct fc_lport *lp;
1810 struct fnic_io_req *io_req = NULL;
1811 struct fc_rport *rport;
1812 spinlock_t *io_lock;
1813 unsigned long flags;
1814 unsigned long start_time = 0;
1817 struct scsi_lun fc_lun;
1818 struct fnic_stats *fnic_stats;
1819 struct abort_stats *abts_stats;
1820 struct terminate_stats *term_stats;
1821 enum fnic_ioreq_state old_ioreq_state;
1823 unsigned long abt_issued_time;
1824 DECLARE_COMPLETION_ONSTACK(tm_done);
1826 /* Wait for rport to unblock */
1827 fc_block_scsi_eh(sc);
1829 /* Get local-port, check ready and link up */
1830 lp = shost_priv(sc->device->host);
1832 fnic = lport_priv(lp);
1833 fnic_stats = &fnic->fnic_stats;
1834 abts_stats = &fnic->fnic_stats.abts_stats;
1835 term_stats = &fnic->fnic_stats.term_stats;
1837 rport = starget_to_rport(scsi_target(sc->device));
1838 tag = sc->request->tag;
1839 FNIC_SCSI_DBG(KERN_DEBUG,
1841 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1842 rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1844 CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1846 if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1848 goto fnic_abort_cmd_end;
1852 * Avoid a race between SCSI issuing the abort and the device
1853 * completing the command.
1855 * If the command is already completed by the fw cmpl code,
1856 * we just return SUCCESS from here. This means that the abort
1857 * succeeded. In the SCSI ML, since the timeout for command has
1858 * happened, the completion wont actually complete the command
1859 * and it will be considered as an aborted command
1861 * The CMD_SP will not be cleared except while holding io_req_lock.
1863 io_lock = fnic_io_lock_hash(fnic, sc);
1864 spin_lock_irqsave(io_lock, flags);
1865 io_req = (struct fnic_io_req *)CMD_SP(sc);
1867 spin_unlock_irqrestore(io_lock, flags);
1868 goto fnic_abort_cmd_end;
1871 io_req->abts_done = &tm_done;
1873 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1874 spin_unlock_irqrestore(io_lock, flags);
1878 abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
1879 if (abt_issued_time <= 6000)
1880 atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
1881 else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
1882 atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
1883 else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
1884 atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
1885 else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
1886 atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
1887 else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
1888 atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
1889 else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
1890 atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
1892 atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
1894 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1895 "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
1897 * Command is still pending, need to abort it
1898 * If the firmware completes the command after this point,
1899 * the completion wont be done till mid-layer, since abort
1900 * has already started.
1902 old_ioreq_state = CMD_STATE(sc);
1903 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1904 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1906 spin_unlock_irqrestore(io_lock, flags);
1909 * Check readiness of the remote port. If the path to remote
1910 * port is up, then send abts to the remote port to terminate
1911 * the IO. Else, just locally terminate the IO in the firmware
1913 if (fc_remote_port_chkready(rport) == 0)
1914 task_req = FCPIO_ITMF_ABT_TASK;
1916 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1917 task_req = FCPIO_ITMF_ABT_TASK_TERM;
1920 /* Now queue the abort command to firmware */
1921 int_to_scsilun(sc->device->lun, &fc_lun);
1923 if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1924 fc_lun.scsi_lun, io_req)) {
1925 spin_lock_irqsave(io_lock, flags);
1926 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1927 CMD_STATE(sc) = old_ioreq_state;
1928 io_req = (struct fnic_io_req *)CMD_SP(sc);
1930 io_req->abts_done = NULL;
1931 spin_unlock_irqrestore(io_lock, flags);
1933 goto fnic_abort_cmd_end;
1935 if (task_req == FCPIO_ITMF_ABT_TASK) {
1936 CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1937 atomic64_inc(&fnic_stats->abts_stats.aborts);
1939 CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1940 atomic64_inc(&fnic_stats->term_stats.terminates);
1944 * We queued an abort IO, wait for its completion.
1945 * Once the firmware completes the abort command, it will
1946 * wake up this thread.
1949 wait_for_completion_timeout(&tm_done,
1951 (2 * fnic->config.ra_tov +
1952 fnic->config.ed_tov));
1954 /* Check the abort status */
1955 spin_lock_irqsave(io_lock, flags);
1957 io_req = (struct fnic_io_req *)CMD_SP(sc);
1959 atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1960 spin_unlock_irqrestore(io_lock, flags);
1961 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1963 goto fnic_abort_cmd_end;
1965 io_req->abts_done = NULL;
1967 /* fw did not complete abort, timed out */
1968 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1969 spin_unlock_irqrestore(io_lock, flags);
1970 if (task_req == FCPIO_ITMF_ABT_TASK) {
1971 atomic64_inc(&abts_stats->abort_drv_timeouts);
1973 atomic64_inc(&term_stats->terminate_drv_timeouts);
1975 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1977 goto fnic_abort_cmd_end;
1980 /* IO out of order */
1982 if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1983 spin_unlock_irqrestore(io_lock, flags);
1984 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1985 "Issuing Host reset due to out of order IO\n");
1988 goto fnic_abort_cmd_end;
1991 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1993 start_time = io_req->start_time;
1995 * firmware completed the abort, check the status,
1996 * free the io_req if successful. If abort fails,
1997 * Device reset will clean the I/O.
1999 if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
2003 spin_unlock_irqrestore(io_lock, flags);
2004 goto fnic_abort_cmd_end;
2007 spin_unlock_irqrestore(io_lock, flags);
2009 fnic_release_ioreq_buf(fnic, io_req, sc);
2010 mempool_free(io_req, fnic->io_req_pool);
2012 if (sc->scsi_done) {
2013 /* Call SCSI completion function to complete the IO */
2014 sc->result = (DID_ABORT << 16);
2016 atomic64_dec(&fnic_stats->io_stats.active_ios);
2017 if (atomic64_read(&fnic->io_cmpl_skip))
2018 atomic64_dec(&fnic->io_cmpl_skip);
2020 atomic64_inc(&fnic_stats->io_stats.io_completions);
2024 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
2025 sc->request->tag, sc,
2026 jiffies_to_msecs(jiffies - start_time),
2027 0, ((u64)sc->cmnd[0] << 32 |
2028 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2029 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2030 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2032 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2033 "Returning from abort cmd type %x %s\n", task_req,
2035 "SUCCESS" : "FAILED");
2039 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
2040 struct scsi_cmnd *sc,
2041 struct fnic_io_req *io_req)
2043 struct vnic_wq_copy *wq = &fnic->wq_copy[0];
2044 struct Scsi_Host *host = fnic->lport->host;
2045 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2046 struct scsi_lun fc_lun;
2048 unsigned long intr_flags;
2050 spin_lock_irqsave(host->host_lock, intr_flags);
2051 if (unlikely(fnic_chk_state_flags_locked(fnic,
2052 FNIC_FLAGS_IO_BLOCKED))) {
2053 spin_unlock_irqrestore(host->host_lock, intr_flags);
2056 atomic_inc(&fnic->in_flight);
2057 spin_unlock_irqrestore(host->host_lock, intr_flags);
2059 spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
2061 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
2062 free_wq_copy_descs(fnic, wq);
2064 if (!vnic_wq_copy_desc_avail(wq)) {
2065 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2066 "queue_dr_io_req failure - no descriptors\n");
2067 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2072 /* fill in the lun info */
2073 int_to_scsilun(sc->device->lun, &fc_lun);
2075 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
2076 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2077 fc_lun.scsi_lun, io_req->port_id,
2078 fnic->config.ra_tov, fnic->config.ed_tov);
2080 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2081 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2082 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2083 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2084 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2087 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2088 atomic_dec(&fnic->in_flight);
2094 * Clean up any pending aborts on the lun
2095 * For each outstanding IO on this lun, whose abort is not completed by fw,
2096 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2097 * successfully aborted, 1 otherwise
2099 static int fnic_clean_pending_aborts(struct fnic *fnic,
2100 struct scsi_cmnd *lr_sc,
2105 struct fnic_io_req *io_req;
2106 spinlock_t *io_lock;
2107 unsigned long flags;
2109 struct scsi_cmnd *sc;
2110 struct scsi_lun fc_lun;
2111 struct scsi_device *lun_dev = lr_sc->device;
2112 DECLARE_COMPLETION_ONSTACK(tm_done);
2113 enum fnic_ioreq_state old_ioreq_state;
2115 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2116 io_lock = fnic_io_lock_tag(fnic, tag);
2117 spin_lock_irqsave(io_lock, flags);
2118 sc = scsi_host_find_tag(fnic->lport->host, tag);
2120 * ignore this lun reset cmd if issued using new SC
2121 * or cmds that do not belong to this lun
2123 if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
2124 spin_unlock_irqrestore(io_lock, flags);
2128 io_req = (struct fnic_io_req *)CMD_SP(sc);
2130 if (!io_req || sc->device != lun_dev) {
2131 spin_unlock_irqrestore(io_lock, flags);
2136 * Found IO that is still pending with firmware and
2137 * belongs to the LUN that we are resetting
2139 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2140 "Found IO in %s on lun\n",
2141 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2143 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2144 spin_unlock_irqrestore(io_lock, flags);
2147 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2148 (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2149 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2150 "%s dev rst not pending sc 0x%p\n", __func__,
2152 spin_unlock_irqrestore(io_lock, flags);
2156 if (io_req->abts_done)
2157 shost_printk(KERN_ERR, fnic->lport->host,
2158 "%s: io_req->abts_done is set state is %s\n",
2159 __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2160 old_ioreq_state = CMD_STATE(sc);
2162 * Any pending IO issued prior to reset is expected to be
2163 * in abts pending state, if not we need to set
2164 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2165 * When IO is completed, the IO will be handed over and
2166 * handled in this function.
2168 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2170 BUG_ON(io_req->abts_done);
2173 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2174 abt_tag |= FNIC_TAG_DEV_RST;
2175 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2176 "%s: dev rst sc 0x%p\n", __func__, sc);
2179 CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2180 io_req->abts_done = &tm_done;
2181 spin_unlock_irqrestore(io_lock, flags);
2183 /* Now queue the abort command to firmware */
2184 int_to_scsilun(sc->device->lun, &fc_lun);
2186 if (fnic_queue_abort_io_req(fnic, abt_tag,
2187 FCPIO_ITMF_ABT_TASK_TERM,
2188 fc_lun.scsi_lun, io_req)) {
2189 spin_lock_irqsave(io_lock, flags);
2190 io_req = (struct fnic_io_req *)CMD_SP(sc);
2192 io_req->abts_done = NULL;
2193 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2194 CMD_STATE(sc) = old_ioreq_state;
2195 spin_unlock_irqrestore(io_lock, flags);
2197 goto clean_pending_aborts_end;
2199 spin_lock_irqsave(io_lock, flags);
2200 if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2201 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2202 spin_unlock_irqrestore(io_lock, flags);
2204 CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2206 wait_for_completion_timeout(&tm_done,
2208 (fnic->config.ed_tov));
2210 /* Recheck cmd state to check if it is now aborted */
2211 spin_lock_irqsave(io_lock, flags);
2212 io_req = (struct fnic_io_req *)CMD_SP(sc);
2214 spin_unlock_irqrestore(io_lock, flags);
2215 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2219 io_req->abts_done = NULL;
2221 /* if abort is still pending with fw, fail */
2222 if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2223 spin_unlock_irqrestore(io_lock, flags);
2224 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2226 goto clean_pending_aborts_end;
2228 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2230 /* original sc used for lr is handled by dev reset code */
2233 spin_unlock_irqrestore(io_lock, flags);
2235 /* original sc used for lr is handled by dev reset code */
2237 fnic_release_ioreq_buf(fnic, io_req, sc);
2238 mempool_free(io_req, fnic->io_req_pool);
2242 * Any IO is returned during reset, it needs to call scsi_done
2243 * to return the scsi_cmnd to upper layer.
2245 if (sc->scsi_done) {
2246 /* Set result to let upper SCSI layer retry */
2247 sc->result = DID_RESET << 16;
2252 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2254 /* walk again to check, if IOs are still pending in fw */
2255 if (fnic_is_abts_pending(fnic, lr_sc))
2258 clean_pending_aborts_end:
2263 * fnic_scsi_host_start_tag
2264 * Allocates tagid from host's tag list
2267 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2269 struct request_queue *q = sc->request->q;
2270 struct request *dummy;
2272 dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
2276 sc->tag = sc->request->tag = dummy->tag;
2277 sc->request->special = sc;
2283 * fnic_scsi_host_end_tag
2284 * frees tag allocated by fnic_scsi_host_start_tag.
2287 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2289 struct request *dummy = sc->request->special;
2291 blk_mq_free_request(dummy);
2295 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2296 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2299 int fnic_device_reset(struct scsi_cmnd *sc)
2301 struct fc_lport *lp;
2303 struct fnic_io_req *io_req = NULL;
2304 struct fc_rport *rport;
2307 spinlock_t *io_lock;
2308 unsigned long flags;
2309 unsigned long start_time = 0;
2310 struct scsi_lun fc_lun;
2311 struct fnic_stats *fnic_stats;
2312 struct reset_stats *reset_stats;
2314 DECLARE_COMPLETION_ONSTACK(tm_done);
2315 int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
2318 /* Wait for rport to unblock */
2319 fc_block_scsi_eh(sc);
2321 /* Get local-port, check ready and link up */
2322 lp = shost_priv(sc->device->host);
2324 fnic = lport_priv(lp);
2325 fnic_stats = &fnic->fnic_stats;
2326 reset_stats = &fnic->fnic_stats.reset_stats;
2328 atomic64_inc(&reset_stats->device_resets);
2330 rport = starget_to_rport(scsi_target(sc->device));
2331 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2332 "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2333 rport->port_id, sc->device->lun, sc);
2335 if (lp->state != LPORT_ST_READY || !(lp->link_up))
2336 goto fnic_device_reset_end;
2338 /* Check if remote port up */
2339 if (fc_remote_port_chkready(rport)) {
2340 atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2341 goto fnic_device_reset_end;
2344 CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2345 /* Allocate tag if not present */
2347 tag = sc->request->tag;
2348 if (unlikely(tag < 0)) {
2350 * Really should fix the midlayer to pass in a proper
2351 * request for ioctls...
2353 tag = fnic_scsi_host_start_tag(fnic, sc);
2354 if (unlikely(tag == SCSI_NO_TAG))
2355 goto fnic_device_reset_end;
2359 io_lock = fnic_io_lock_hash(fnic, sc);
2360 spin_lock_irqsave(io_lock, flags);
2361 io_req = (struct fnic_io_req *)CMD_SP(sc);
2364 * If there is a io_req attached to this command, then use it,
2365 * else allocate a new one.
2368 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2370 spin_unlock_irqrestore(io_lock, flags);
2371 goto fnic_device_reset_end;
2373 memset(io_req, 0, sizeof(*io_req));
2374 io_req->port_id = rport->port_id;
2375 CMD_SP(sc) = (char *)io_req;
2377 io_req->dr_done = &tm_done;
2378 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2379 CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2380 spin_unlock_irqrestore(io_lock, flags);
2382 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2385 * issue the device reset, if enqueue failed, clean up the ioreq
2386 * and break assoc with scsi cmd
2388 if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2389 spin_lock_irqsave(io_lock, flags);
2390 io_req = (struct fnic_io_req *)CMD_SP(sc);
2392 io_req->dr_done = NULL;
2393 goto fnic_device_reset_clean;
2395 spin_lock_irqsave(io_lock, flags);
2396 CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2397 spin_unlock_irqrestore(io_lock, flags);
2400 * Wait on the local completion for LUN reset. The io_req may be
2401 * freed while we wait since we hold no lock.
2403 wait_for_completion_timeout(&tm_done,
2404 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2406 spin_lock_irqsave(io_lock, flags);
2407 io_req = (struct fnic_io_req *)CMD_SP(sc);
2409 spin_unlock_irqrestore(io_lock, flags);
2410 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2411 "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2412 goto fnic_device_reset_end;
2414 io_req->dr_done = NULL;
2416 status = CMD_LR_STATUS(sc);
2419 * If lun reset not completed, bail out with failed. io_req
2420 * gets cleaned up during higher levels of EH
2422 if (status == FCPIO_INVALID_CODE) {
2423 atomic64_inc(&reset_stats->device_reset_timeouts);
2424 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2425 "Device reset timed out\n");
2426 CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2427 spin_unlock_irqrestore(io_lock, flags);
2428 int_to_scsilun(sc->device->lun, &fc_lun);
2430 * Issue abort and terminate on device reset request.
2431 * If q'ing of terminate fails, retry it after a delay.
2434 spin_lock_irqsave(io_lock, flags);
2435 if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2436 spin_unlock_irqrestore(io_lock, flags);
2439 spin_unlock_irqrestore(io_lock, flags);
2440 if (fnic_queue_abort_io_req(fnic,
2441 tag | FNIC_TAG_DEV_RST,
2442 FCPIO_ITMF_ABT_TASK_TERM,
2443 fc_lun.scsi_lun, io_req)) {
2444 wait_for_completion_timeout(&tm_done,
2445 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2447 spin_lock_irqsave(io_lock, flags);
2448 CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2449 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2450 io_req->abts_done = &tm_done;
2451 spin_unlock_irqrestore(io_lock, flags);
2452 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2453 "Abort and terminate issued on Device reset "
2454 "tag 0x%x sc 0x%p\n", tag, sc);
2459 spin_lock_irqsave(io_lock, flags);
2460 if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2461 spin_unlock_irqrestore(io_lock, flags);
2462 wait_for_completion_timeout(&tm_done,
2463 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2466 io_req = (struct fnic_io_req *)CMD_SP(sc);
2467 io_req->abts_done = NULL;
2468 goto fnic_device_reset_clean;
2472 spin_unlock_irqrestore(io_lock, flags);
2475 /* Completed, but not successful, clean up the io_req, return fail */
2476 if (status != FCPIO_SUCCESS) {
2477 spin_lock_irqsave(io_lock, flags);
2478 FNIC_SCSI_DBG(KERN_DEBUG,
2480 "Device reset completed - failed\n");
2481 io_req = (struct fnic_io_req *)CMD_SP(sc);
2482 goto fnic_device_reset_clean;
2486 * Clean up any aborts on this lun that have still not
2487 * completed. If any of these fail, then LUN reset fails.
2488 * clean_pending_aborts cleans all cmds on this lun except
2489 * the lun reset cmd. If all cmds get cleaned, the lun reset
2492 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2493 spin_lock_irqsave(io_lock, flags);
2494 io_req = (struct fnic_io_req *)CMD_SP(sc);
2495 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2496 "Device reset failed"
2497 " since could not abort all IOs\n");
2498 goto fnic_device_reset_clean;
2501 /* Clean lun reset command */
2502 spin_lock_irqsave(io_lock, flags);
2503 io_req = (struct fnic_io_req *)CMD_SP(sc);
2505 /* Completed, and successful */
2508 fnic_device_reset_clean:
2512 spin_unlock_irqrestore(io_lock, flags);
2515 start_time = io_req->start_time;
2516 fnic_release_ioreq_buf(fnic, io_req, sc);
2517 mempool_free(io_req, fnic->io_req_pool);
2520 fnic_device_reset_end:
2521 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2522 sc->request->tag, sc,
2523 jiffies_to_msecs(jiffies - start_time),
2524 0, ((u64)sc->cmnd[0] << 32 |
2525 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2526 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2527 (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2529 /* free tag if it is allocated */
2530 if (unlikely(tag_gen_flag))
2531 fnic_scsi_host_end_tag(fnic, sc);
2533 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2534 "Returning from device reset %s\n",
2536 "SUCCESS" : "FAILED");
2539 atomic64_inc(&reset_stats->device_reset_failures);
2544 /* Clean up all IOs, clean up libFC local port */
2545 int fnic_reset(struct Scsi_Host *shost)
2547 struct fc_lport *lp;
2550 struct reset_stats *reset_stats;
2552 lp = shost_priv(shost);
2553 fnic = lport_priv(lp);
2554 reset_stats = &fnic->fnic_stats.reset_stats;
2556 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2557 "fnic_reset called\n");
2559 atomic64_inc(&reset_stats->fnic_resets);
2562 * Reset local port, this will clean up libFC exchanges,
2563 * reset remote port sessions, and if link is up, begin flogi
2565 ret = fc_lport_reset(lp);
2567 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2568 "Returning from fnic reset %s\n",
2570 "SUCCESS" : "FAILED");
2573 atomic64_inc(&reset_stats->fnic_reset_completions);
2575 atomic64_inc(&reset_stats->fnic_reset_failures);
2581 * SCSI Error handling calls driver's eh_host_reset if all prior
2582 * error handling levels return FAILED. If host reset completes
2583 * successfully, and if link is up, then Fabric login begins.
2585 * Host Reset is the highest level of error recovery. If this fails, then
2586 * host is offlined by SCSI.
2589 int fnic_host_reset(struct scsi_cmnd *sc)
2592 unsigned long wait_host_tmo;
2593 struct Scsi_Host *shost = sc->device->host;
2594 struct fc_lport *lp = shost_priv(shost);
2595 struct fnic *fnic = lport_priv(lp);
2596 unsigned long flags;
2598 spin_lock_irqsave(&fnic->fnic_lock, flags);
2599 if (fnic->internal_reset_inprogress == 0) {
2600 fnic->internal_reset_inprogress = 1;
2602 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2603 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2604 "host reset in progress skipping another host reset\n");
2607 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2610 * If fnic_reset is successful, wait for fabric login to complete
2611 * scsi-ml tries to send a TUR to every device if host reset is
2612 * successful, so before returning to scsi, fabric should be up
2614 ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2615 if (ret == SUCCESS) {
2616 wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2618 while (time_before(jiffies, wait_host_tmo)) {
2619 if ((lp->state == LPORT_ST_READY) &&
2628 spin_lock_irqsave(&fnic->fnic_lock, flags);
2629 fnic->internal_reset_inprogress = 0;
2630 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2635 * This fxn is called from libFC when host is removed
2637 void fnic_scsi_abort_io(struct fc_lport *lp)
2640 unsigned long flags;
2641 enum fnic_state old_state;
2642 struct fnic *fnic = lport_priv(lp);
2643 DECLARE_COMPLETION_ONSTACK(remove_wait);
2645 /* Issue firmware reset for fnic, wait for reset to complete */
2647 spin_lock_irqsave(&fnic->fnic_lock, flags);
2648 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2649 /* fw reset is in progress, poll for its completion */
2650 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2651 schedule_timeout(msecs_to_jiffies(100));
2652 goto retry_fw_reset;
2655 fnic->remove_wait = &remove_wait;
2656 old_state = fnic->state;
2657 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2658 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2659 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2661 err = fnic_fw_reset_handler(fnic);
2663 spin_lock_irqsave(&fnic->fnic_lock, flags);
2664 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2665 fnic->state = old_state;
2666 fnic->remove_wait = NULL;
2667 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2671 /* Wait for firmware reset to complete */
2672 wait_for_completion_timeout(&remove_wait,
2673 msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2675 spin_lock_irqsave(&fnic->fnic_lock, flags);
2676 fnic->remove_wait = NULL;
2677 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2678 "fnic_scsi_abort_io %s\n",
2679 (fnic->state == FNIC_IN_ETH_MODE) ?
2680 "SUCCESS" : "FAILED");
2681 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2686 * This fxn called from libFC to clean up driver IO state on link down
2688 void fnic_scsi_cleanup(struct fc_lport *lp)
2690 unsigned long flags;
2691 enum fnic_state old_state;
2692 struct fnic *fnic = lport_priv(lp);
2694 /* issue fw reset */
2696 spin_lock_irqsave(&fnic->fnic_lock, flags);
2697 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2698 /* fw reset is in progress, poll for its completion */
2699 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2700 schedule_timeout(msecs_to_jiffies(100));
2701 goto retry_fw_reset;
2703 old_state = fnic->state;
2704 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2705 fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2706 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2708 if (fnic_fw_reset_handler(fnic)) {
2709 spin_lock_irqsave(&fnic->fnic_lock, flags);
2710 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2711 fnic->state = old_state;
2712 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2717 void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2721 void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2723 struct fnic *fnic = lport_priv(lp);
2725 /* Non-zero sid, nothing to do */
2727 goto call_fc_exch_mgr_reset;
2730 fnic_rport_exch_reset(fnic, did);
2731 goto call_fc_exch_mgr_reset;
2736 * link down or device being removed
2738 if (!fnic->in_remove)
2739 fnic_scsi_cleanup(lp);
2741 fnic_scsi_abort_io(lp);
2743 /* call libFC exch mgr reset to reset its exchanges */
2744 call_fc_exch_mgr_reset:
2745 fc_exch_mgr_reset(lp, sid, did);
2750 * fnic_is_abts_pending() is a helper function that
2751 * walks through tag map to check if there is any IOs pending,if there is one,
2752 * then it returns 1 (true), otherwise 0 (false)
2753 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2754 * otherwise, it checks for all IOs.
2756 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2759 struct fnic_io_req *io_req;
2760 spinlock_t *io_lock;
2761 unsigned long flags;
2763 struct scsi_cmnd *sc;
2764 struct scsi_device *lun_dev = NULL;
2767 lun_dev = lr_sc->device;
2769 /* walk again to check, if IOs are still pending in fw */
2770 for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2771 sc = scsi_host_find_tag(fnic->lport->host, tag);
2773 * ignore this lun reset cmd or cmds that do not belong to
2776 if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2779 io_lock = fnic_io_lock_hash(fnic, sc);
2780 spin_lock_irqsave(io_lock, flags);
2782 io_req = (struct fnic_io_req *)CMD_SP(sc);
2784 if (!io_req || sc->device != lun_dev) {
2785 spin_unlock_irqrestore(io_lock, flags);
2790 * Found IO that is still pending with firmware and
2791 * belongs to the LUN that we are resetting
2793 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2794 "Found IO in %s on lun\n",
2795 fnic_ioreq_state_to_str(CMD_STATE(sc)));
2797 if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2799 spin_unlock_irqrestore(io_lock, flags);