2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static int ql2xtgt_tape_enable;
46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
51 module_param(qlini_mode, charp, S_IRUGO);
52 MODULE_PARM_DESC(qlini_mode,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
60 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
62 static int temp_sam_status = SAM_STAT_BUSY;
65 * From scsi/fc/fc_fcp.h
67 enum fcp_resp_rsp_codes {
69 FCP_DATA_LEN_INVALID = 1,
70 FCP_CMND_FIELDS_INVALID = 2,
71 FCP_DATA_PARAM_MISMATCH = 3,
74 FCP_TMF_INVALID_LUN = 9,
78 * fc_pri_ta from scsi/fc/fc_fcp.h
80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
84 #define FCP_PTA_MASK 7 /* mask for task attribute field */
85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
103 struct atio_from_isp *pkt, uint8_t);
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull);
115 static void qlt_disable_vha(struct scsi_qla_host *vha);
116 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
121 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
122 struct imm_ntfy_from_isp *imm, int ha_locked);
126 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
127 static struct kmem_cache *qla_tgt_plogi_cachep;
128 static mempool_t *qla_tgt_mgmt_cmd_mempool;
129 static struct workqueue_struct *qla_tgt_wq;
130 static DEFINE_MUTEX(qla_tgt_mutex);
131 static LIST_HEAD(qla_tgt_glist);
133 /* This API intentionally takes dest as a parameter, rather than returning
134 * int value to avoid caller forgetting to issue wmb() after the store */
135 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
137 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
138 *dest = atomic_inc_return(&base_vha->generation_tick);
143 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
144 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
146 const uint8_t *port_name)
148 struct qla_tgt_sess *sess;
150 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
151 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
158 /* Might release hw lock, then reaquire!! */
159 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
161 /* Send marker if required */
162 if (unlikely(vha->marker_needed != 0)) {
163 int rc = qla2x00_issue_marker(vha, vha_locked);
164 if (rc != QLA_SUCCESS) {
165 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
166 "qla_target(%d): issue_marker() failed\n",
175 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
178 struct qla_hw_data *ha = vha->hw;
181 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
184 if (vha->d_id.b.al_pa == d_id[2])
187 BUG_ON(ha->tgt.tgt_vp_map == NULL);
188 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
189 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
190 return ha->tgt.tgt_vp_map[vp_idx].vha;
196 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
199 struct qla_hw_data *ha = vha->hw;
201 if (vha->vp_idx == vp_idx)
204 BUG_ON(ha->tgt.tgt_vp_map == NULL);
205 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
206 return ha->tgt.tgt_vp_map[vp_idx].vha;
211 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
215 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
217 vha->hw->tgt.num_pend_cmds++;
218 if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
219 vha->hw->qla_stats.stat_max_pend_cmds =
220 vha->hw->tgt.num_pend_cmds;
221 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
223 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
227 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
228 vha->hw->tgt.num_pend_cmds--;
229 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
232 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
233 struct atio_from_isp *atio, uint8_t ha_locked)
235 ql_dbg(ql_dbg_tgt, vha, 0xe072,
236 "%s: qla_target(%d): type %x ox_id %04x\n",
237 __func__, vha->vp_idx, atio->u.raw.entry_type,
238 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
240 switch (atio->u.raw.entry_type) {
243 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
244 atio->u.isp24.fcp_hdr.d_id);
245 if (unlikely(NULL == host)) {
246 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
247 "qla_target(%d): Received ATIO_TYPE7 "
248 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
249 atio->u.isp24.fcp_hdr.d_id[0],
250 atio->u.isp24.fcp_hdr.d_id[1],
251 atio->u.isp24.fcp_hdr.d_id[2]);
254 qlt_24xx_atio_pkt(host, atio, ha_locked);
258 case IMMED_NOTIFY_TYPE:
260 struct scsi_qla_host *host = vha;
261 struct imm_ntfy_from_isp *entry =
262 (struct imm_ntfy_from_isp *)atio;
264 if ((entry->u.isp24.vp_index != 0xFF) &&
265 (entry->u.isp24.nport_handle != 0xFFFF)) {
266 host = qlt_find_host_by_vp_idx(vha,
267 entry->u.isp24.vp_index);
268 if (unlikely(!host)) {
269 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
270 "qla_target(%d): Received "
271 "ATIO (IMMED_NOTIFY_TYPE) "
272 "with unknown vp_index %d\n",
273 vha->vp_idx, entry->u.isp24.vp_index);
277 qlt_24xx_atio_pkt(host, atio, ha_locked);
282 ql_dbg(ql_dbg_tgt, vha, 0xe040,
283 "qla_target(%d): Received unknown ATIO atio "
284 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
291 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
293 switch (pkt->entry_type) {
295 ql_dbg(ql_dbg_tgt, vha, 0xe073,
296 "qla_target(%d):%s: CRC2 Response pkt\n",
297 vha->vp_idx, __func__);
300 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
301 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
303 if (unlikely(!host)) {
304 ql_dbg(ql_dbg_tgt, vha, 0xe041,
305 "qla_target(%d): Response pkt (CTIO_TYPE7) "
306 "received, with unknown vp_index %d\n",
307 vha->vp_idx, entry->vp_index);
310 qlt_response_pkt(host, pkt);
314 case IMMED_NOTIFY_TYPE:
316 struct scsi_qla_host *host = vha;
317 struct imm_ntfy_from_isp *entry =
318 (struct imm_ntfy_from_isp *)pkt;
320 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
321 if (unlikely(!host)) {
322 ql_dbg(ql_dbg_tgt, vha, 0xe042,
323 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
324 "received, with unknown vp_index %d\n",
325 vha->vp_idx, entry->u.isp24.vp_index);
328 qlt_response_pkt(host, pkt);
332 case NOTIFY_ACK_TYPE:
334 struct scsi_qla_host *host = vha;
335 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
337 if (0xFF != entry->u.isp24.vp_index) {
338 host = qlt_find_host_by_vp_idx(vha,
339 entry->u.isp24.vp_index);
340 if (unlikely(!host)) {
341 ql_dbg(ql_dbg_tgt, vha, 0xe043,
342 "qla_target(%d): Response "
343 "pkt (NOTIFY_ACK_TYPE) "
344 "received, with unknown "
345 "vp_index %d\n", vha->vp_idx,
346 entry->u.isp24.vp_index);
350 qlt_response_pkt(host, pkt);
356 struct abts_recv_from_24xx *entry =
357 (struct abts_recv_from_24xx *)pkt;
358 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
360 if (unlikely(!host)) {
361 ql_dbg(ql_dbg_tgt, vha, 0xe044,
362 "qla_target(%d): Response pkt "
363 "(ABTS_RECV_24XX) received, with unknown "
364 "vp_index %d\n", vha->vp_idx, entry->vp_index);
367 qlt_response_pkt(host, pkt);
373 struct abts_resp_to_24xx *entry =
374 (struct abts_resp_to_24xx *)pkt;
375 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
377 if (unlikely(!host)) {
378 ql_dbg(ql_dbg_tgt, vha, 0xe045,
379 "qla_target(%d): Response pkt "
380 "(ABTS_RECV_24XX) received, with unknown "
381 "vp_index %d\n", vha->vp_idx, entry->vp_index);
384 qlt_response_pkt(host, pkt);
389 qlt_response_pkt(vha, pkt);
396 * All qlt_plogi_ack_t operations are protected by hardware_lock
400 * This is a zero-base ref-counting solution, since hardware_lock
401 * guarantees that ref_count is not modified concurrently.
402 * Upon successful return content of iocb is undefined
404 static qlt_plogi_ack_t *
405 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
406 struct imm_ntfy_from_isp *iocb)
408 qlt_plogi_ack_t *pla;
410 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
411 if (pla->id.b24 == id->b24) {
412 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
418 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
420 ql_dbg(ql_dbg_async, vha, 0x5088,
421 "qla_target(%d): Allocation of plogi_ack failed\n",
428 list_add_tail(&pla->list, &vha->plogi_ack_list);
433 static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla)
435 BUG_ON(!pla->ref_count);
441 ql_dbg(ql_dbg_async, vha, 0x5089,
442 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
443 " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name,
444 pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1],
445 pla->iocb.u.isp24.port_id[0],
446 le16_to_cpu(pla->iocb.u.isp24.nport_handle),
447 pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id);
448 qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0);
450 list_del(&pla->list);
451 kmem_cache_free(qla_tgt_plogi_cachep, pla);
455 qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla,
456 struct qla_tgt_sess *sess, qlt_plogi_link_t link)
458 /* Inc ref_count first because link might already be pointing at pla */
461 if (sess->plogi_link[link])
462 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
464 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
465 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
466 " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name,
467 pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2],
468 pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0],
471 sess->plogi_link[link] = pla;
475 /* These fields must be initialized by the caller */
478 * number of cmds dropped while we were waiting for
479 * initiator to ack LOGO initialize to 1 if LOGO is
480 * triggered by a command, otherwise, to 0
484 /* These fields are used by callee */
485 struct list_head list;
489 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
491 qlt_port_logo_t *tmp;
494 mutex_lock(&vha->vha_tgt.tgt_mutex);
496 list_for_each_entry(tmp, &vha->logo_list, list) {
497 if (tmp->id.b24 == logo->id.b24) {
498 tmp->cmd_count += logo->cmd_count;
499 mutex_unlock(&vha->vha_tgt.tgt_mutex);
504 list_add_tail(&logo->list, &vha->logo_list);
506 mutex_unlock(&vha->vha_tgt.tgt_mutex);
508 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
510 mutex_lock(&vha->vha_tgt.tgt_mutex);
511 list_del(&logo->list);
512 mutex_unlock(&vha->vha_tgt.tgt_mutex);
514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
515 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
516 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
517 logo->cmd_count, res);
520 static void qlt_free_session_done(struct work_struct *work)
522 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
524 struct qla_tgt *tgt = sess->tgt;
525 struct scsi_qla_host *vha = sess->vha;
526 struct qla_hw_data *ha = vha->hw;
528 bool logout_started = false;
531 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
532 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
533 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
534 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
535 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
536 sess->logout_on_delete, sess->keep_nport_handle,
537 sess->send_els_logo);
541 if (sess->send_els_logo) {
542 qlt_port_logo_t logo;
543 logo.id = sess->s_id;
545 qlt_send_first_logo(vha, &logo);
548 if (sess->logout_on_delete) {
551 memset(&fcport, 0, sizeof(fcport));
552 fcport.loop_id = sess->loop_id;
553 fcport.d_id = sess->s_id;
554 memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
556 fcport.tgt_session = sess;
558 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
559 if (rc != QLA_SUCCESS)
560 ql_log(ql_log_warn, vha, 0xf085,
561 "Schedule logo failed sess %p rc %d\n",
564 logout_started = true;
568 * Release the target session for FC Nexus from fabric module code.
570 if (sess->se_sess != NULL)
571 ha->tgt.tgt_ops->free_session(sess);
573 if (logout_started) {
576 while (!ACCESS_ONCE(sess->logout_completed)) {
578 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
579 "%s: waiting for sess %p logout\n",
586 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
587 "%s: sess %p logout completed\n",
591 spin_lock_irqsave(&ha->hardware_lock, flags);
594 qlt_plogi_ack_t *own =
595 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
596 qlt_plogi_ack_t *con =
597 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
600 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
601 "se_sess %p / sess %p port %8phC is gone,"
602 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
603 sess->se_sess, sess, sess->port_name,
604 own ? "releasing own PLOGI" :
605 "no own PLOGI pending",
606 own ? own->ref_count : -1,
607 con->iocb.u.isp24.port_name, con->ref_count);
608 qlt_plogi_ack_unref(vha, con);
610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
611 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
612 sess->se_sess, sess, sess->port_name,
613 own ? "releasing own PLOGI" :
614 "no own PLOGI pending",
615 own ? own->ref_count : -1);
619 qlt_plogi_ack_unref(vha, own);
622 list_del(&sess->sess_list_entry);
624 spin_unlock_irqrestore(&ha->hardware_lock, flags);
626 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
627 "Unregistration of sess %p finished\n", sess);
631 * We need to protect against race, when tgt is freed before or
635 if (tgt->sess_count == 0)
636 wake_up_all(&tgt->waitQ);
639 /* ha->tgt.sess_lock supposed to be held on entry */
640 void qlt_unreg_sess(struct qla_tgt_sess *sess)
642 struct scsi_qla_host *vha = sess->vha;
644 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
646 if (!list_empty(&sess->del_list_entry))
647 list_del_init(&sess->del_list_entry);
648 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
650 INIT_WORK(&sess->free_work, qlt_free_session_done);
651 schedule_work(&sess->free_work);
653 EXPORT_SYMBOL(qlt_unreg_sess);
656 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
658 struct qla_hw_data *ha = vha->hw;
659 struct qla_tgt_sess *sess = NULL;
660 uint32_t unpacked_lun, lun = 0;
663 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
664 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
667 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
668 if (loop_id == 0xFFFF) {
670 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
671 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
672 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
673 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
674 #if 0 /* FIXME: do we need to choose a session here? */
675 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
676 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
677 typeof(*sess), sess_list_entry);
679 case QLA_TGT_NEXUS_LOSS_SESS:
680 mcmd = QLA_TGT_NEXUS_LOSS;
682 case QLA_TGT_ABORT_ALL_SESS:
683 mcmd = QLA_TGT_ABORT_ALL;
685 case QLA_TGT_NEXUS_LOSS:
686 case QLA_TGT_ABORT_ALL:
689 ql_dbg(ql_dbg_tgt, vha, 0xe046,
690 "qla_target(%d): Not allowed "
691 "command %x in %s", vha->vp_idx,
700 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
701 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
702 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
705 ql_dbg(ql_dbg_tgt, vha, 0xe000,
706 "Using sess for qla_tgt_reset: %p\n", sess);
712 ql_dbg(ql_dbg_tgt, vha, 0xe047,
713 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
714 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
717 lun = a->u.isp24.fcp_cmnd.lun;
718 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
720 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
721 iocb, QLA24XX_MGMT_SEND_NACK);
724 /* ha->tgt.sess_lock supposed to be held on entry */
725 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
728 struct qla_tgt *tgt = sess->tgt;
729 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
732 /* Upgrade to unconditional deletion in case it was temporary */
733 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
734 list_del(&sess->del_list_entry);
739 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
740 "Scheduling sess %p for deletion\n", sess);
744 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
745 list_add(&sess->del_list_entry, &tgt->del_sess_list);
747 sess->deleted = QLA_SESS_DELETION_PENDING;
748 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
751 sess->expires = jiffies + dev_loss_tmo * HZ;
753 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
754 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
755 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
756 sess->vha->vp_idx, sess->port_name, sess->loop_id,
757 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
758 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
762 mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
764 schedule_delayed_work(&tgt->sess_del_work,
765 sess->expires - jiffies);
768 /* ha->tgt.sess_lock supposed to be held on entry */
769 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
771 struct qla_tgt_sess *sess;
773 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
774 qlt_schedule_sess_for_deletion(sess, true);
776 /* At this point tgt could be already dead */
779 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
782 struct qla_hw_data *ha = vha->hw;
783 dma_addr_t gid_list_dma;
784 struct gid_list_info *gid_list;
789 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
790 &gid_list_dma, GFP_KERNEL);
792 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
793 "qla_target(%d): DMA Alloc failed of %u\n",
794 vha->vp_idx, qla2x00_gid_list_size(ha));
798 /* Get list of logged in devices */
799 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
800 if (rc != QLA_SUCCESS) {
801 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
802 "qla_target(%d): get_id_list() failed: %x\n",
805 goto out_free_id_list;
808 id_iter = (char *)gid_list;
810 for (i = 0; i < entries; i++) {
811 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
812 if ((gid->al_pa == s_id[2]) &&
813 (gid->area == s_id[1]) &&
814 (gid->domain == s_id[0])) {
815 *loop_id = le16_to_cpu(gid->loop_id);
819 id_iter += ha->gid_list_info_size;
823 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
824 gid_list, gid_list_dma);
828 /* ha->tgt.sess_lock supposed to be held on entry */
829 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
831 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
833 list_del_init(&sess->del_list_entry);
837 static void qlt_del_sess_work_fn(struct delayed_work *work)
839 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
841 struct scsi_qla_host *vha = tgt->vha;
842 struct qla_hw_data *ha = vha->hw;
843 struct qla_tgt_sess *sess;
844 unsigned long flags, elapsed;
846 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
847 while (!list_empty(&tgt->del_sess_list)) {
848 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
851 if (time_after_eq(elapsed, sess->expires)) {
852 /* No turning back */
853 list_del_init(&sess->del_list_entry);
854 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
856 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
857 "Timeout: sess %p about to be deleted\n",
859 ha->tgt.tgt_ops->shutdown_sess(sess);
860 ha->tgt.tgt_ops->put_sess(sess);
862 schedule_delayed_work(&tgt->sess_del_work,
863 sess->expires - elapsed);
867 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
871 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
872 * Caller must put it.
874 static struct qla_tgt_sess *qlt_create_sess(
875 struct scsi_qla_host *vha,
879 struct qla_hw_data *ha = vha->hw;
880 struct qla_tgt_sess *sess;
882 unsigned char be_sid[3];
884 /* Check to avoid double sessions */
885 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
886 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
888 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
889 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
890 "Double sess %p found (s_id %x:%x:%x, "
891 "loop_id %d), updating to d_id %x:%x:%x, "
892 "loop_id %d", sess, sess->s_id.b.domain,
893 sess->s_id.b.al_pa, sess->s_id.b.area,
894 sess->loop_id, fcport->d_id.b.domain,
895 fcport->d_id.b.al_pa, fcport->d_id.b.area,
898 /* Cannot undelete at this point */
899 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
900 spin_unlock_irqrestore(&ha->tgt.sess_lock,
906 qlt_undelete_sess(sess);
908 kref_get(&sess->se_sess->sess_kref);
909 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
910 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
912 if (sess->local && !local)
915 qlt_do_generation_tick(vha, &sess->generation);
917 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
922 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
924 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
926 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
927 "qla_target(%u): session allocation failed, all commands "
928 "from port %8phC will be refused", vha->vp_idx,
933 sess->tgt = vha->vha_tgt.qla_tgt;
935 sess->s_id = fcport->d_id;
936 sess->loop_id = fcport->loop_id;
938 INIT_LIST_HEAD(&sess->del_list_entry);
940 /* Under normal circumstances we want to logout from firmware when
941 * session eventually ends and release corresponding nport handle.
942 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
943 * code will adjust these flags as necessary. */
944 sess->logout_on_delete = 1;
945 sess->keep_nport_handle = 0;
947 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
948 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
949 sess, vha->vha_tgt.qla_tgt);
951 be_sid[0] = sess->s_id.b.domain;
952 be_sid[1] = sess->s_id.b.area;
953 be_sid[2] = sess->s_id.b.al_pa;
955 * Determine if this fc_port->port_name is allowed to access
956 * target mode using explict NodeACLs+MappedLUNs, or using
957 * TPG demo mode. If this is successful a target mode FC nexus
960 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
961 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
966 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
967 * access across ->tgt.sess_lock reaquire.
969 kref_get(&sess->se_sess->sess_kref);
971 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
972 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
973 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
975 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
976 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
977 vha->vha_tgt.qla_tgt->sess_count++;
978 qlt_do_generation_tick(vha, &sess->generation);
979 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
981 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
982 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
983 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
984 vha->vp_idx, local ? "local " : "", fcport->port_name,
985 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
986 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
992 * Called from qla2x00_reg_remote_port()
994 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
996 struct qla_hw_data *ha = vha->hw;
997 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
998 struct qla_tgt_sess *sess;
1001 if (!vha->hw->tgt.tgt_ops)
1004 if (!tgt || (fcport->port_type != FCT_INITIATOR))
1007 if (qla_ini_mode_enabled(vha))
1010 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1011 if (tgt->tgt_stop) {
1012 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1015 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
1017 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1019 mutex_lock(&vha->vha_tgt.tgt_mutex);
1020 sess = qlt_create_sess(vha, fcport, false);
1021 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1023 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1024 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1025 /* Point of no return */
1026 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1029 kref_get(&sess->se_sess->sess_kref);
1031 if (sess->deleted) {
1032 qlt_undelete_sess(sess);
1034 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
1035 "qla_target(%u): %ssession for port %8phC "
1036 "(loop ID %d) reappeared\n", vha->vp_idx,
1037 sess->local ? "local " : "", sess->port_name,
1040 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
1041 "Reappeared sess %p\n", sess);
1043 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
1044 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
1047 if (sess && sess->local) {
1048 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
1049 "qla_target(%u): local session for "
1050 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
1051 fcport->port_name, sess->loop_id);
1054 ha->tgt.tgt_ops->put_sess(sess);
1055 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1059 * max_gen - specifies maximum session generation
1060 * at which this deletion requestion is still valid
1063 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1065 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1066 struct qla_tgt_sess *sess;
1067 unsigned long flags;
1069 if (!vha->hw->tgt.tgt_ops)
1075 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1076 if (tgt->tgt_stop) {
1077 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1080 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
1082 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1086 if (max_gen - sess->generation < 0) {
1087 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1088 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1089 "Ignoring stale deletion request for se_sess %p / sess %p"
1090 " for port %8phC, req_gen %d, sess_gen %d\n",
1091 sess->se_sess, sess, sess->port_name, max_gen,
1096 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1099 qlt_schedule_sess_for_deletion(sess, false);
1100 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1103 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1105 struct qla_hw_data *ha = tgt->ha;
1106 unsigned long flags;
1109 * We need to protect against race, when tgt is freed before or
1112 spin_lock_irqsave(&ha->hardware_lock, flags);
1113 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1114 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
1115 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
1116 res = (tgt->sess_count == 0);
1117 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1122 /* Called by tcm_qla2xxx configfs code */
1123 int qlt_stop_phase1(struct qla_tgt *tgt)
1125 struct scsi_qla_host *vha = tgt->vha;
1126 struct qla_hw_data *ha = tgt->ha;
1127 unsigned long flags;
1129 mutex_lock(&qla_tgt_mutex);
1130 if (!vha->fc_vport) {
1131 struct Scsi_Host *sh = vha->host;
1132 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
1135 spin_lock_irqsave(sh->host_lock, flags);
1136 npiv_vports = (fc_host->npiv_vports_inuse);
1137 spin_unlock_irqrestore(sh->host_lock, flags);
1140 mutex_unlock(&qla_tgt_mutex);
1144 if (tgt->tgt_stop || tgt->tgt_stopped) {
1145 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1146 "Already in tgt->tgt_stop or tgt_stopped state\n");
1147 mutex_unlock(&qla_tgt_mutex);
1151 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1154 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1155 * Lock is needed, because we still can get an incoming packet.
1157 mutex_lock(&vha->vha_tgt.tgt_mutex);
1158 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1160 qlt_clear_tgt_db(tgt);
1161 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1162 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1163 mutex_unlock(&qla_tgt_mutex);
1165 flush_delayed_work(&tgt->sess_del_work);
1167 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1168 "Waiting for sess works (tgt %p)", tgt);
1169 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1170 while (!list_empty(&tgt->sess_works_list)) {
1171 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1172 flush_scheduled_work();
1173 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1175 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1177 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1178 "Waiting for tgt %p: list_empty(sess_list)=%d "
1179 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
1182 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1185 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
1186 qlt_disable_vha(vha);
1188 /* Wait for sessions to clear out (just in case) */
1189 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1192 EXPORT_SYMBOL(qlt_stop_phase1);
1194 /* Called by tcm_qla2xxx configfs code */
1195 void qlt_stop_phase2(struct qla_tgt *tgt)
1197 struct qla_hw_data *ha = tgt->ha;
1198 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1199 unsigned long flags;
1201 if (tgt->tgt_stopped) {
1202 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1203 "Already in tgt->tgt_stopped state\n");
1208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1209 "Waiting for %d IRQ commands to complete (tgt %p)",
1210 tgt->irq_cmd_count, tgt);
1212 mutex_lock(&vha->vha_tgt.tgt_mutex);
1213 spin_lock_irqsave(&ha->hardware_lock, flags);
1214 while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
1215 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1217 spin_lock_irqsave(&ha->hardware_lock, flags);
1220 tgt->tgt_stopped = 1;
1221 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1222 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1224 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
1227 EXPORT_SYMBOL(qlt_stop_phase2);
1229 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1230 static void qlt_release(struct qla_tgt *tgt)
1232 scsi_qla_host_t *vha = tgt->vha;
1234 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1235 qlt_stop_phase2(tgt);
1237 vha->vha_tgt.qla_tgt = NULL;
1239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1240 "Release of tgt %p finished\n", tgt);
1245 /* ha->hardware_lock supposed to be held on entry */
1246 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1247 const void *param, unsigned int param_size)
1249 struct qla_tgt_sess_work_param *prm;
1250 unsigned long flags;
1252 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1254 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1255 "qla_target(%d): Unable to create session "
1256 "work, command will be refused", 0);
1260 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1261 "Scheduling work (type %d, prm %p)"
1262 " to find session for param %p (size %d, tgt %p)\n",
1263 type, prm, param, param_size, tgt);
1266 memcpy(&prm->tm_iocb, param, param_size);
1268 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1269 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1270 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1272 schedule_work(&tgt->sess_work);
1278 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1280 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1281 struct imm_ntfy_from_isp *ntfy,
1282 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1283 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1285 struct qla_hw_data *ha = vha->hw;
1287 struct nack_to_isp *nack;
1289 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1291 /* Send marker if required */
1292 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1295 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1297 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1298 "qla_target(%d): %s failed: unable to allocate "
1299 "request packet\n", vha->vp_idx, __func__);
1303 if (vha->vha_tgt.qla_tgt != NULL)
1304 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1306 pkt->entry_type = NOTIFY_ACK_TYPE;
1307 pkt->entry_count = 1;
1309 nack = (struct nack_to_isp *)pkt;
1310 nack->ox_id = ntfy->ox_id;
1312 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1313 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1314 nack->u.isp24.flags = ntfy->u.isp24.flags &
1315 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1317 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1318 nack->u.isp24.status = ntfy->u.isp24.status;
1319 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1320 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1321 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1322 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1323 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1324 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1325 nack->u.isp24.srr_reject_code = srr_reject_code;
1326 nack->u.isp24.srr_reject_code_expl = srr_explan;
1327 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1329 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1330 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1331 vha->vp_idx, nack->u.isp24.status);
1333 /* Memory Barrier */
1335 qla2x00_start_iocbs(vha, vha->req);
1339 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1341 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1342 struct abts_recv_from_24xx *abts, uint32_t status,
1345 struct qla_hw_data *ha = vha->hw;
1346 struct abts_resp_to_24xx *resp;
1350 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1351 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1354 /* Send marker if required */
1355 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1358 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1360 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1361 "qla_target(%d): %s failed: unable to allocate "
1362 "request packet", vha->vp_idx, __func__);
1366 resp->entry_type = ABTS_RESP_24XX;
1367 resp->entry_count = 1;
1368 resp->nport_handle = abts->nport_handle;
1369 resp->vp_index = vha->vp_idx;
1370 resp->sof_type = abts->sof_type;
1371 resp->exchange_address = abts->exchange_address;
1372 resp->fcp_hdr_le = abts->fcp_hdr_le;
1373 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1374 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1375 F_CTL_SEQ_INITIATIVE);
1376 p = (uint8_t *)&f_ctl;
1377 resp->fcp_hdr_le.f_ctl[0] = *p++;
1378 resp->fcp_hdr_le.f_ctl[1] = *p++;
1379 resp->fcp_hdr_le.f_ctl[2] = *p;
1381 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1382 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1383 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1384 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1385 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1386 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1388 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1389 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1390 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1391 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1392 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1393 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1395 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1396 if (status == FCP_TMF_CMPL) {
1397 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1398 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1399 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1400 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1401 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1402 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1404 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1405 resp->payload.ba_rjt.reason_code =
1406 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1407 /* Other bytes are zero */
1410 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1412 /* Memory Barrier */
1414 qla2x00_start_iocbs(vha, vha->req);
1418 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1420 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1421 struct abts_resp_from_24xx_fw *entry)
1423 struct ctio7_to_24xx *ctio;
1425 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1426 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1427 /* Send marker if required */
1428 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1431 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1433 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1434 "qla_target(%d): %s failed: unable to allocate "
1435 "request packet\n", vha->vp_idx, __func__);
1440 * We've got on entrance firmware's response on by us generated
1441 * ABTS response. So, in it ID fields are reversed.
1444 ctio->entry_type = CTIO_TYPE7;
1445 ctio->entry_count = 1;
1446 ctio->nport_handle = entry->nport_handle;
1447 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1448 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1449 ctio->vp_index = vha->vp_idx;
1450 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1451 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1452 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1453 ctio->exchange_addr = entry->exchange_addr_to_abort;
1454 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1455 CTIO7_FLAGS_TERMINATE);
1456 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1458 /* Memory Barrier */
1460 qla2x00_start_iocbs(vha, vha->req);
1462 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1463 FCP_TMF_CMPL, true);
1466 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1468 struct qla_tgt_sess_op *op;
1469 struct qla_tgt_cmd *cmd;
1471 spin_lock(&vha->cmd_list_lock);
1473 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1474 if (tag == op->atio.u.isp24.exchange_addr) {
1476 spin_unlock(&vha->cmd_list_lock);
1481 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1482 if (tag == cmd->atio.u.isp24.exchange_addr) {
1484 spin_unlock(&vha->cmd_list_lock);
1489 spin_unlock(&vha->cmd_list_lock);
1493 /* drop cmds for the given lun
1494 * XXX only looks for cmds on the port through which lun reset was recieved
1495 * XXX does not go through the list of other port (which may have cmds
1498 static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1499 uint32_t lun, uint8_t *s_id)
1501 struct qla_tgt_sess_op *op;
1502 struct qla_tgt_cmd *cmd;
1505 key = sid_to_key(s_id);
1506 spin_lock(&vha->cmd_list_lock);
1507 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1511 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1512 op_lun = scsilun_to_int(
1513 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1514 if (op_key == key && op_lun == lun)
1517 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1521 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1522 cmd_lun = scsilun_to_int(
1523 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1524 if (cmd_key == key && cmd_lun == lun)
1527 spin_unlock(&vha->cmd_list_lock);
1530 /* ha->hardware_lock supposed to be held on entry */
1531 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1532 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1534 struct qla_hw_data *ha = vha->hw;
1535 struct se_session *se_sess = sess->se_sess;
1536 struct qla_tgt_mgmt_cmd *mcmd;
1537 struct se_cmd *se_cmd;
1540 bool found_lun = false;
1542 spin_lock(&se_sess->sess_cmd_lock);
1543 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1544 struct qla_tgt_cmd *cmd =
1545 container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1546 if (se_cmd->tag == abts->exchange_addr_to_abort) {
1547 lun = cmd->unpacked_lun;
1552 spin_unlock(&se_sess->sess_cmd_lock);
1554 /* cmd not in LIO lists, look in qla list */
1556 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1557 /* send TASK_ABORT response immediately */
1558 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1561 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1562 "unable to find cmd in driver or LIO for tag 0x%x\n",
1563 abts->exchange_addr_to_abort);
1568 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1569 "qla_target(%d): task abort (tag=%d)\n",
1570 vha->vp_idx, abts->exchange_addr_to_abort);
1572 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1574 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1575 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1576 vha->vp_idx, __func__);
1579 memset(mcmd, 0, sizeof(*mcmd));
1582 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1583 mcmd->reset_count = vha->hw->chip_reset;
1585 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1586 abts->exchange_addr_to_abort);
1588 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1589 "qla_target(%d): tgt_ops->handle_tmr()"
1590 " failed: %d", vha->vp_idx, rc);
1591 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1599 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1601 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1602 struct abts_recv_from_24xx *abts)
1604 struct qla_hw_data *ha = vha->hw;
1605 struct qla_tgt_sess *sess;
1606 uint32_t tag = abts->exchange_addr_to_abort;
1609 unsigned long flags;
1611 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1612 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1613 "qla_target(%d): ABTS: Abort Sequence not "
1614 "supported\n", vha->vp_idx);
1615 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1619 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1620 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1621 "qla_target(%d): ABTS: Unknown Exchange "
1622 "Address received\n", vha->vp_idx);
1623 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1627 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1628 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1629 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1630 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1631 le32_to_cpu(abts->fcp_hdr_le.parameter));
1633 s_id[0] = abts->fcp_hdr_le.s_id[2];
1634 s_id[1] = abts->fcp_hdr_le.s_id[1];
1635 s_id[2] = abts->fcp_hdr_le.s_id[0];
1637 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1638 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1640 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1641 "qla_target(%d): task abort for non-existant session\n",
1643 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1644 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1646 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1649 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1654 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1657 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1658 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1662 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1664 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1665 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1667 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1673 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1675 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1676 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1678 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1679 struct ctio7_to_24xx *ctio;
1682 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1683 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1684 ha, atio, resp_code);
1686 /* Send marker if required */
1687 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1690 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1692 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1693 "qla_target(%d): %s failed: unable to allocate "
1694 "request packet\n", ha->vp_idx, __func__);
1698 ctio->entry_type = CTIO_TYPE7;
1699 ctio->entry_count = 1;
1700 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1701 ctio->nport_handle = mcmd->sess->loop_id;
1702 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1703 ctio->vp_index = ha->vp_idx;
1704 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1705 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1706 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1707 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1708 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1709 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
1710 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1711 ctio->u.status1.ox_id = cpu_to_le16(temp);
1712 ctio->u.status1.scsi_status =
1713 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1714 ctio->u.status1.response_len = cpu_to_le16(8);
1715 ctio->u.status1.sense_data[0] = resp_code;
1717 /* Memory Barrier */
1719 qla2x00_start_iocbs(ha, ha->req);
1722 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1724 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1726 EXPORT_SYMBOL(qlt_free_mcmd);
1728 /* callback from target fabric module code */
1729 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1731 struct scsi_qla_host *vha = mcmd->sess->vha;
1732 struct qla_hw_data *ha = vha->hw;
1733 unsigned long flags;
1735 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1736 "TM response mcmd (%p) status %#x state %#x",
1737 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1739 spin_lock_irqsave(&ha->hardware_lock, flags);
1741 if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
1743 * Either the port is not online or this request was from
1744 * previous life, just abort the processing.
1746 ql_dbg(ql_dbg_async, vha, 0xe100,
1747 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
1748 vha->flags.online, qla2x00_reset_active(vha),
1749 mcmd->reset_count, ha->chip_reset);
1750 ha->tgt.tgt_ops->free_mcmd(mcmd);
1751 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1755 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1756 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1759 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1760 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1761 mcmd->fc_tm_rsp, false);
1763 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1767 * Make the callback for ->free_mcmd() to queue_work() and invoke
1768 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1769 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1770 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1771 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1772 * qlt_xmit_tm_rsp() returns here..
1774 ha->tgt.tgt_ops->free_mcmd(mcmd);
1775 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1777 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1780 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1782 struct qla_tgt_cmd *cmd = prm->cmd;
1784 BUG_ON(cmd->sg_cnt == 0);
1786 prm->sg = (struct scatterlist *)cmd->sg;
1787 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1788 cmd->sg_cnt, cmd->dma_data_direction);
1789 if (unlikely(prm->seg_cnt == 0))
1792 prm->cmd->sg_mapped = 1;
1794 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
1796 * If greater than four sg entries then we need to allocate
1797 * the continuation entries
1799 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1800 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1801 prm->tgt->datasegs_per_cmd,
1802 prm->tgt->datasegs_per_cont);
1805 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1806 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1807 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
1808 prm->tot_dsds = prm->seg_cnt;
1810 prm->tot_dsds = prm->seg_cnt;
1812 if (cmd->prot_sg_cnt) {
1813 prm->prot_sg = cmd->prot_sg;
1814 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1815 cmd->prot_sg, cmd->prot_sg_cnt,
1816 cmd->dma_data_direction);
1817 if (unlikely(prm->prot_seg_cnt == 0))
1820 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1821 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1822 /* Dif Bundling not support here */
1823 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
1825 prm->tot_dsds += prm->prot_seg_cnt;
1827 prm->tot_dsds += prm->prot_seg_cnt;
1834 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1835 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1836 0, prm->cmd->sg_cnt);
1840 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
1842 struct qla_hw_data *ha = vha->hw;
1844 if (!cmd->sg_mapped)
1847 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1850 if (cmd->prot_sg_cnt)
1851 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1852 cmd->dma_data_direction);
1854 if (cmd->ctx_dsd_alloced)
1855 qla2x00_clean_dsd_pool(ha, NULL, cmd);
1858 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
1861 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1864 uint32_t cnt, cnt_in;
1866 if (vha->req->cnt < (req_cnt + 2)) {
1867 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
1868 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
1870 if (vha->req->ring_index < cnt)
1871 vha->req->cnt = cnt - vha->req->ring_index;
1873 vha->req->cnt = vha->req->length -
1874 (vha->req->ring_index - cnt);
1877 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1878 ql_dbg(ql_dbg_io, vha, 0x305a,
1879 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1880 vha->vp_idx, vha->req->ring_index,
1881 vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
1884 vha->req->cnt -= req_cnt;
1890 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1892 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1894 /* Adjust ring index. */
1895 vha->req->ring_index++;
1896 if (vha->req->ring_index == vha->req->length) {
1897 vha->req->ring_index = 0;
1898 vha->req->ring_ptr = vha->req->ring;
1900 vha->req->ring_ptr++;
1902 return (cont_entry_t *)vha->req->ring_ptr;
1905 /* ha->hardware_lock supposed to be held on entry */
1906 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1908 struct qla_hw_data *ha = vha->hw;
1911 h = ha->tgt.current_handle;
1912 /* always increment cmd handle */
1915 if (h > DEFAULT_OUTSTANDING_COMMANDS)
1916 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1917 if (h == ha->tgt.current_handle) {
1918 ql_dbg(ql_dbg_io, vha, 0x305b,
1919 "qla_target(%d): Ran out of "
1920 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1921 h = QLA_TGT_NULL_HANDLE;
1924 } while ((h == QLA_TGT_NULL_HANDLE) ||
1925 (h == QLA_TGT_SKIP_HANDLE) ||
1926 (ha->tgt.cmds[h-1] != NULL));
1928 if (h != QLA_TGT_NULL_HANDLE)
1929 ha->tgt.current_handle = h;
1934 /* ha->hardware_lock supposed to be held on entry */
1935 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1936 struct scsi_qla_host *vha)
1939 struct ctio7_to_24xx *pkt;
1940 struct qla_hw_data *ha = vha->hw;
1941 struct atio_from_isp *atio = &prm->cmd->atio;
1944 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1946 memset(pkt, 0, sizeof(*pkt));
1948 pkt->entry_type = CTIO_TYPE7;
1949 pkt->entry_count = (uint8_t)prm->req_cnt;
1950 pkt->vp_index = vha->vp_idx;
1952 h = qlt_make_handle(vha);
1953 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1955 * CTIO type 7 from the firmware doesn't provide a way to
1956 * know the initiator's LOOP ID, hence we can't find
1957 * the session and, so, the command.
1961 ha->tgt.cmds[h-1] = prm->cmd;
1963 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1964 pkt->nport_handle = prm->cmd->loop_id;
1965 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1966 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1967 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1968 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1969 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1970 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1971 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1972 pkt->u.status0.ox_id = cpu_to_le16(temp);
1973 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1979 * ha->hardware_lock supposed to be held on entry. We have already made sure
1980 * that there is sufficient amount of request entries to not drop it.
1982 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1983 struct scsi_qla_host *vha)
1986 uint32_t *dword_ptr;
1987 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1989 /* Build continuation packets */
1990 while (prm->seg_cnt > 0) {
1991 cont_a64_entry_t *cont_pkt64 =
1992 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1995 * Make sure that from cont_pkt64 none of
1996 * 64-bit specific fields used for 32-bit
1997 * addressing. Cast to (cont_entry_t *) for
2001 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2003 cont_pkt64->entry_count = 1;
2004 cont_pkt64->sys_define = 0;
2006 if (enable_64bit_addressing) {
2007 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2009 (uint32_t *)&cont_pkt64->dseg_0_address;
2011 cont_pkt64->entry_type = CONTINUE_TYPE;
2013 (uint32_t *)&((cont_entry_t *)
2014 cont_pkt64)->dseg_0_address;
2017 /* Load continuation entry data segments */
2019 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
2020 cnt++, prm->seg_cnt--) {
2022 cpu_to_le32(pci_dma_lo32
2023 (sg_dma_address(prm->sg)));
2024 if (enable_64bit_addressing) {
2026 cpu_to_le32(pci_dma_hi32
2030 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2032 prm->sg = sg_next(prm->sg);
2038 * ha->hardware_lock supposed to be held on entry. We have already made sure
2039 * that there is sufficient amount of request entries to not drop it.
2041 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
2042 struct scsi_qla_host *vha)
2045 uint32_t *dword_ptr;
2046 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
2047 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2049 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2051 /* Setup packet address segment pointer */
2052 dword_ptr = pkt24->u.status0.dseg_0_address;
2054 /* Set total data segment count */
2056 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2058 if (prm->seg_cnt == 0) {
2059 /* No data transfer */
2065 /* If scatter gather */
2067 /* Load command entry data segments */
2069 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
2070 cnt++, prm->seg_cnt--) {
2072 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
2073 if (enable_64bit_addressing) {
2075 cpu_to_le32(pci_dma_hi32(
2076 sg_dma_address(prm->sg)));
2078 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2080 prm->sg = sg_next(prm->sg);
2083 qlt_load_cont_data_segments(prm, vha);
2086 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2088 return cmd->bufflen > 0;
2092 * Called without ha->hardware_lock held
2094 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2095 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2096 uint32_t *full_req_cnt)
2098 struct qla_tgt *tgt = cmd->tgt;
2099 struct scsi_qla_host *vha = tgt->vha;
2100 struct qla_hw_data *ha = vha->hw;
2101 struct se_cmd *se_cmd = &cmd->se_cmd;
2105 prm->rq_result = scsi_status;
2106 prm->sense_buffer = &cmd->sense_buffer[0];
2107 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2111 prm->add_status_pkt = 0;
2113 /* Send marker if required */
2114 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2117 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2118 if (qlt_pci_map_calc_cnt(prm) != 0)
2122 *full_req_cnt = prm->req_cnt;
2124 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2125 prm->residual = se_cmd->residual_count;
2126 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
2127 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2128 prm->residual, se_cmd->tag,
2129 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2130 cmd->bufflen, prm->rq_result);
2131 prm->rq_result |= SS_RESIDUAL_UNDER;
2132 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2133 prm->residual = se_cmd->residual_count;
2134 ql_dbg(ql_dbg_io, vha, 0x305d,
2135 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2136 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2137 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2138 prm->rq_result |= SS_RESIDUAL_OVER;
2141 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2143 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2144 * ignored in *xmit_response() below
2146 if (qlt_has_data(cmd)) {
2147 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2148 (IS_FWI2_CAPABLE(ha) &&
2149 (prm->rq_result != 0))) {
2150 prm->add_status_pkt = 1;
2159 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
2160 struct qla_tgt_cmd *cmd, int sending_sense)
2162 if (ha->tgt.enable_class_2)
2166 return cmd->conf_compl_supported;
2168 return ha->tgt.enable_explicit_conf &&
2169 cmd->conf_compl_supported;
2172 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
2174 * Original taken from the XFS code
2176 static unsigned long qlt_srr_random(void)
2179 static unsigned long RandomValue;
2180 static DEFINE_SPINLOCK(lock);
2181 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
2185 unsigned long flags;
2187 spin_lock_irqsave(&lock, flags);
2189 RandomValue = jiffies;
2195 rv = 16807 * lo - 2836 * hi;
2199 spin_unlock_irqrestore(&lock, flags);
2203 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
2205 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
2206 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
2208 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
2209 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
2210 "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
2214 * It's currently not possible to simulate SRRs for FCP_WRITE without
2215 * a physical link layer failure, so don't even try here..
2217 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
2220 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
2221 ((qlt_srr_random() % 100) == 20)) {
2223 unsigned int tot_len = 0;
2226 leave = qlt_srr_random() % cmd->sg_cnt;
2228 for (i = 0; i < leave; i++)
2229 tot_len += cmd->sg[i].length;
2231 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
2232 "Cutting cmd %p (tag %d) buffer"
2233 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
2234 " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
2235 cmd->bufflen, cmd->sg_cnt);
2237 cmd->bufflen = tot_len;
2238 cmd->sg_cnt = leave;
2241 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
2242 unsigned int offset = qlt_srr_random() % cmd->bufflen;
2244 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
2245 "Cutting cmd %p (tag %d) buffer head "
2246 "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
2249 *xmit_type &= ~QLA_TGT_XMIT_DATA;
2250 else if (qlt_set_data_offset(cmd, offset)) {
2251 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
2252 "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
2257 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
2261 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2262 struct qla_tgt_prm *prm)
2264 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2265 (uint32_t)sizeof(ctio->u.status1.sense_data));
2266 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2267 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2268 ctio->u.status0.flags |= cpu_to_le16(
2269 CTIO7_FLAGS_EXPLICIT_CONFORM |
2270 CTIO7_FLAGS_CONFORM_REQ);
2272 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2273 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2274 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2277 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2278 if (prm->cmd->se_cmd.scsi_status != 0) {
2279 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
2280 "Skipping EXPLICIT_CONFORM and "
2281 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2282 "non GOOD status\n");
2283 goto skip_explict_conf;
2285 ctio->u.status1.flags |= cpu_to_le16(
2286 CTIO7_FLAGS_EXPLICIT_CONFORM |
2287 CTIO7_FLAGS_CONFORM_REQ);
2290 ctio->u.status1.flags &=
2291 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2292 ctio->u.status1.flags |=
2293 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2294 ctio->u.status1.scsi_status |=
2295 cpu_to_le16(SS_SENSE_LEN_VALID);
2296 ctio->u.status1.sense_length =
2297 cpu_to_le16(prm->sense_buffer_len);
2298 for (i = 0; i < prm->sense_buffer_len/4; i++)
2299 ((uint32_t *)ctio->u.status1.sense_data)[i] =
2300 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2302 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
2305 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2306 "qla_target(%d): %d bytes of sense "
2307 "lost", prm->tgt->ha->vp_idx,
2308 prm->sense_buffer_len % 4);
2314 ctio->u.status1.flags &=
2315 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2316 ctio->u.status1.flags |=
2317 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2318 ctio->u.status1.sense_length = 0;
2319 memset(ctio->u.status1.sense_data, 0,
2320 sizeof(ctio->u.status1.sense_data));
2323 /* Sense with len > 24, is it possible ??? */
2330 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2333 * Uncomment when corresponding SCSI changes are done.
2335 if (!sp->cmd->prot_chk)
2339 switch (se_cmd->prot_op) {
2340 case TARGET_PROT_DOUT_INSERT:
2341 case TARGET_PROT_DIN_STRIP:
2342 if (ql2xenablehba_err_chk >= 1)
2345 case TARGET_PROT_DOUT_PASS:
2346 case TARGET_PROT_DIN_PASS:
2347 if (ql2xenablehba_err_chk >= 2)
2350 case TARGET_PROT_DIN_INSERT:
2351 case TARGET_PROT_DOUT_STRIP:
2360 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2364 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2366 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2368 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2369 * have been immplemented by TCM, before AppTag is avail.
2370 * Look for modesense_handlers[]
2373 ctx->app_tag_mask[0] = 0x0;
2374 ctx->app_tag_mask[1] = 0x0;
2376 switch (se_cmd->prot_type) {
2377 case TARGET_DIF_TYPE0_PROT:
2379 * No check for ql2xenablehba_err_chk, as it would be an
2380 * I/O error if hba tag generation is not done.
2382 ctx->ref_tag = cpu_to_le32(lba);
2384 if (!qlt_hba_err_chk_enabled(se_cmd))
2387 /* enable ALL bytes of the ref tag */
2388 ctx->ref_tag_mask[0] = 0xff;
2389 ctx->ref_tag_mask[1] = 0xff;
2390 ctx->ref_tag_mask[2] = 0xff;
2391 ctx->ref_tag_mask[3] = 0xff;
2394 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2397 case TARGET_DIF_TYPE1_PROT:
2398 ctx->ref_tag = cpu_to_le32(lba);
2400 if (!qlt_hba_err_chk_enabled(se_cmd))
2403 /* enable ALL bytes of the ref tag */
2404 ctx->ref_tag_mask[0] = 0xff;
2405 ctx->ref_tag_mask[1] = 0xff;
2406 ctx->ref_tag_mask[2] = 0xff;
2407 ctx->ref_tag_mask[3] = 0xff;
2410 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2411 * match LBA in CDB + N
2413 case TARGET_DIF_TYPE2_PROT:
2414 ctx->ref_tag = cpu_to_le32(lba);
2416 if (!qlt_hba_err_chk_enabled(se_cmd))
2419 /* enable ALL bytes of the ref tag */
2420 ctx->ref_tag_mask[0] = 0xff;
2421 ctx->ref_tag_mask[1] = 0xff;
2422 ctx->ref_tag_mask[2] = 0xff;
2423 ctx->ref_tag_mask[3] = 0xff;
2426 /* For Type 3 protection: 16 bit GUARD only */
2427 case TARGET_DIF_TYPE3_PROT:
2428 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2429 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2436 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2439 uint32_t transfer_length = 0;
2440 uint32_t data_bytes;
2442 uint8_t bundling = 1;
2444 struct crc_context *crc_ctx_pkt = NULL;
2445 struct qla_hw_data *ha;
2446 struct ctio_crc2_to_fw *pkt;
2447 dma_addr_t crc_ctx_dma;
2448 uint16_t fw_prot_opts = 0;
2449 struct qla_tgt_cmd *cmd = prm->cmd;
2450 struct se_cmd *se_cmd = &cmd->se_cmd;
2452 struct atio_from_isp *atio = &prm->cmd->atio;
2457 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
2459 memset(pkt, 0, sizeof(*pkt));
2461 ql_dbg(ql_dbg_tgt, vha, 0xe071,
2462 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2463 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2464 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2466 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2467 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2470 /* Compute dif len and adjust data len to incude protection */
2471 data_bytes = cmd->bufflen;
2472 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
2474 switch (se_cmd->prot_op) {
2475 case TARGET_PROT_DIN_INSERT:
2476 case TARGET_PROT_DOUT_STRIP:
2477 transfer_length = data_bytes;
2478 data_bytes += dif_bytes;
2481 case TARGET_PROT_DIN_STRIP:
2482 case TARGET_PROT_DOUT_INSERT:
2483 case TARGET_PROT_DIN_PASS:
2484 case TARGET_PROT_DOUT_PASS:
2485 transfer_length = data_bytes + dif_bytes;
2493 if (!qlt_hba_err_chk_enabled(se_cmd))
2494 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2495 /* HBA error checking enabled */
2496 else if (IS_PI_UNINIT_CAPABLE(ha)) {
2497 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2498 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2499 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2500 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2501 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2504 switch (se_cmd->prot_op) {
2505 case TARGET_PROT_DIN_INSERT:
2506 case TARGET_PROT_DOUT_INSERT:
2507 fw_prot_opts |= PO_MODE_DIF_INSERT;
2509 case TARGET_PROT_DIN_STRIP:
2510 case TARGET_PROT_DOUT_STRIP:
2511 fw_prot_opts |= PO_MODE_DIF_REMOVE;
2513 case TARGET_PROT_DIN_PASS:
2514 case TARGET_PROT_DOUT_PASS:
2515 fw_prot_opts |= PO_MODE_DIF_PASS;
2516 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2518 default:/* Normal Request */
2519 fw_prot_opts |= PO_MODE_DIF_PASS;
2525 /* Update entry type to indicate Command Type CRC_2 IOCB */
2526 pkt->entry_type = CTIO_CRC2;
2527 pkt->entry_count = 1;
2528 pkt->vp_index = vha->vp_idx;
2530 h = qlt_make_handle(vha);
2531 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2533 * CTIO type 7 from the firmware doesn't provide a way to
2534 * know the initiator's LOOP ID, hence we can't find
2535 * the session and, so, the command.
2539 ha->tgt.cmds[h-1] = prm->cmd;
2542 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2543 pkt->nport_handle = prm->cmd->loop_id;
2544 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2545 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2546 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2547 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2548 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2550 /* silence compile warning */
2551 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2552 pkt->ox_id = cpu_to_le16(t16);
2554 t16 = (atio->u.isp24.attr << 9);
2555 pkt->flags |= cpu_to_le16(t16);
2556 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2558 /* Set transfer direction */
2559 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2560 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2561 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2562 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2565 pkt->dseg_count = prm->tot_dsds;
2566 /* Fibre channel byte count */
2567 pkt->transfer_length = cpu_to_le32(transfer_length);
2570 /* ----- CRC context -------- */
2572 /* Allocate CRC context from global pool */
2573 crc_ctx_pkt = cmd->ctx =
2574 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2577 goto crc_queuing_error;
2579 /* Zero out CTX area. */
2580 clr_ptr = (uint8_t *)crc_ctx_pkt;
2581 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2583 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2584 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2587 crc_ctx_pkt->handle = pkt->handle;
2589 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
2591 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2592 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2593 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2597 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2600 * Configure Bundling if we need to fetch interlaving
2601 * protection PCI accesses
2603 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2604 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2605 crc_ctx_pkt->u.bundling.dseg_count =
2606 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2607 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2610 /* Finish the common fields of CRC pkt */
2611 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
2612 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
2613 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2614 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2617 /* Walks data segments */
2618 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2620 if (!bundling && prm->prot_seg_cnt) {
2621 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2622 prm->tot_dsds, cmd))
2623 goto crc_queuing_error;
2624 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2625 (prm->tot_dsds - prm->prot_seg_cnt), cmd))
2626 goto crc_queuing_error;
2628 if (bundling && prm->prot_seg_cnt) {
2629 /* Walks dif segments */
2630 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2632 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2633 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2634 prm->prot_seg_cnt, cmd))
2635 goto crc_queuing_error;
2640 /* Cleanup will be performed by the caller */
2642 return QLA_FUNCTION_FAILED;
2647 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2648 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2650 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2651 uint8_t scsi_status)
2653 struct scsi_qla_host *vha = cmd->vha;
2654 struct qla_hw_data *ha = vha->hw;
2655 struct ctio7_to_24xx *pkt;
2656 struct qla_tgt_prm prm;
2657 uint32_t full_req_cnt = 0;
2658 unsigned long flags = 0;
2661 spin_lock_irqsave(&ha->hardware_lock, flags);
2662 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2663 cmd->state = QLA_TGT_STATE_PROCESSED;
2664 if (cmd->sess->logout_completed)
2665 /* no need to terminate. FW already freed exchange. */
2666 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2668 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2672 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2674 memset(&prm, 0, sizeof(prm));
2675 qlt_check_srr_debug(cmd, &xmit_type);
2677 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2678 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2679 (xmit_type & QLA_TGT_XMIT_STATUS) ?
2680 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
2683 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2685 if (unlikely(res != 0)) {
2689 spin_lock_irqsave(&ha->hardware_lock, flags);
2691 if (xmit_type == QLA_TGT_XMIT_STATUS)
2692 vha->tgt_counters.core_qla_snd_status++;
2694 vha->tgt_counters.core_qla_que_buf++;
2696 if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
2698 * Either the port is not online or this request was from
2699 * previous life, just abort the processing.
2701 cmd->state = QLA_TGT_STATE_PROCESSED;
2702 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2703 ql_dbg(ql_dbg_async, vha, 0xe101,
2704 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
2705 vha->flags.online, qla2x00_reset_active(vha),
2706 cmd->reset_count, ha->chip_reset);
2707 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2711 /* Does F/W have an IOCBs for this request */
2712 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2714 goto out_unmap_unlock;
2716 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
2717 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2719 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2720 if (unlikely(res != 0)) {
2721 vha->req->cnt += full_req_cnt;
2722 goto out_unmap_unlock;
2725 pkt = (struct ctio7_to_24xx *)prm.pkt;
2727 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2728 pkt->u.status0.flags |=
2729 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2730 CTIO7_FLAGS_STATUS_MODE_0);
2732 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2733 qlt_load_data_segments(&prm, vha);
2735 if (prm.add_status_pkt == 0) {
2736 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2737 pkt->u.status0.scsi_status =
2738 cpu_to_le16(prm.rq_result);
2739 pkt->u.status0.residual =
2740 cpu_to_le32(prm.residual);
2741 pkt->u.status0.flags |= cpu_to_le16(
2742 CTIO7_FLAGS_SEND_STATUS);
2743 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2744 pkt->u.status0.flags |=
2746 CTIO7_FLAGS_EXPLICIT_CONFORM |
2747 CTIO7_FLAGS_CONFORM_REQ);
2753 * We have already made sure that there is sufficient
2754 * amount of request entries to not drop HW lock in
2757 struct ctio7_to_24xx *ctio =
2758 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2760 ql_dbg(ql_dbg_io, vha, 0x305e,
2761 "Building additional status packet 0x%p.\n",
2765 * T10Dif: ctio_crc2_to_fw overlay ontop of
2768 memcpy(ctio, pkt, sizeof(*ctio));
2769 /* reset back to CTIO7 */
2770 ctio->entry_count = 1;
2771 ctio->entry_type = CTIO_TYPE7;
2772 ctio->dseg_count = 0;
2773 ctio->u.status1.flags &= ~cpu_to_le16(
2774 CTIO7_FLAGS_DATA_IN);
2776 /* Real finish is ctio_m1's finish */
2777 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2778 pkt->u.status0.flags |= cpu_to_le16(
2779 CTIO7_FLAGS_DONT_RET_CTIO);
2781 /* qlt_24xx_init_ctio_to_isp will correct
2782 * all neccessary fields that's part of CTIO7.
2783 * There should be no residual of CTIO-CRC2 data.
2785 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2787 pr_debug("Status CTIO7: %p\n", ctio);
2790 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2793 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2794 cmd->cmd_sent_to_fw = 1;
2796 /* Memory Barrier */
2798 qla2x00_start_iocbs(vha, vha->req);
2799 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2804 qlt_unmap_sg(vha, cmd);
2805 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2809 EXPORT_SYMBOL(qlt_xmit_response);
2811 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2813 struct ctio7_to_24xx *pkt;
2814 struct scsi_qla_host *vha = cmd->vha;
2815 struct qla_hw_data *ha = vha->hw;
2816 struct qla_tgt *tgt = cmd->tgt;
2817 struct qla_tgt_prm prm;
2818 unsigned long flags;
2821 memset(&prm, 0, sizeof(prm));
2827 /* Send marker if required */
2828 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2831 /* Calculate number of entries and segments required */
2832 if (qlt_pci_map_calc_cnt(&prm) != 0)
2835 spin_lock_irqsave(&ha->hardware_lock, flags);
2837 if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
2838 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2840 * Either the port is not online or this request was from
2841 * previous life, just abort the processing.
2843 cmd->state = QLA_TGT_STATE_NEED_DATA;
2844 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2845 ql_dbg(ql_dbg_async, vha, 0xe102,
2846 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2847 vha->flags.online, qla2x00_reset_active(vha),
2848 cmd->reset_count, ha->chip_reset);
2849 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2853 /* Does F/W have an IOCBs for this request */
2854 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2856 goto out_unlock_free_unmap;
2857 if (cmd->se_cmd.prot_op)
2858 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2860 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2862 if (unlikely(res != 0)) {
2863 vha->req->cnt += prm.req_cnt;
2864 goto out_unlock_free_unmap;
2867 pkt = (struct ctio7_to_24xx *)prm.pkt;
2868 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2869 CTIO7_FLAGS_STATUS_MODE_0);
2871 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2872 qlt_load_data_segments(&prm, vha);
2874 cmd->state = QLA_TGT_STATE_NEED_DATA;
2875 cmd->cmd_sent_to_fw = 1;
2877 /* Memory Barrier */
2879 qla2x00_start_iocbs(vha, vha->req);
2880 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2884 out_unlock_free_unmap:
2885 qlt_unmap_sg(vha, cmd);
2886 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2890 EXPORT_SYMBOL(qlt_rdy_to_xfer);
2894 * Checks the guard or meta-data for the type of error
2895 * detected by the HBA.
2898 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2899 struct ctio_crc_from_fw *sts)
2901 uint8_t *ap = &sts->actual_dif[0];
2902 uint8_t *ep = &sts->expected_dif[0];
2903 uint32_t e_ref_tag, a_ref_tag;
2904 uint16_t e_app_tag, a_app_tag;
2905 uint16_t e_guard, a_guard;
2906 uint64_t lba = cmd->se_cmd.t_task_lba;
2908 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
2909 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
2910 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
2912 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
2913 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
2914 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
2916 ql_dbg(ql_dbg_tgt, vha, 0xe075,
2917 "iocb(s) %p Returned STATUS.\n", sts);
2919 ql_dbg(ql_dbg_tgt, vha, 0xf075,
2920 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2921 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2922 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2926 * For type 3: ref & app tag is all 'f's
2927 * For type 0,1,2: app tag is all 'f's
2929 if ((a_app_tag == 0xffff) &&
2930 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
2931 (a_ref_tag == 0xffffffff))) {
2932 uint32_t blocks_done;
2934 /* 2TB boundary case covered automatically with this */
2935 blocks_done = e_ref_tag - (uint32_t)lba + 1;
2936 cmd->se_cmd.bad_sector = e_ref_tag;
2937 cmd->se_cmd.pi_err = 0;
2938 ql_dbg(ql_dbg_tgt, vha, 0xf074,
2939 "need to return scsi good\n");
2941 /* Update protection tag */
2942 if (cmd->prot_sg_cnt) {
2943 uint32_t i, k = 0, num_ent;
2944 struct scatterlist *sg, *sgl;
2949 /* Patch the corresponding protection tags */
2950 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
2951 num_ent = sg_dma_len(sg) / 8;
2952 if (k + num_ent < blocks_done) {
2960 if (k != blocks_done) {
2961 ql_log(ql_log_warn, vha, 0xf076,
2962 "unexpected tag values tag:lba=%u:%llu)\n",
2963 e_ref_tag, (unsigned long long)lba);
2968 struct sd_dif_tuple *spt;
2970 * This section came from initiator. Is it valid here?
2971 * should ulp be override with actual val???
2973 spt = page_address(sg_page(sg)) + sg->offset;
2976 spt->app_tag = 0xffff;
2977 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
2978 spt->ref_tag = 0xffffffff;
2986 if (e_guard != a_guard) {
2987 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
2988 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2990 ql_log(ql_log_warn, vha, 0xe076,
2991 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2992 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2993 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2994 a_guard, e_guard, cmd);
2999 if (e_ref_tag != a_ref_tag) {
3000 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
3001 cmd->se_cmd.bad_sector = e_ref_tag;
3003 ql_log(ql_log_warn, vha, 0xe077,
3004 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
3005 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3006 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
3007 a_guard, e_guard, cmd);
3011 /* check appl tag */
3012 if (e_app_tag != a_app_tag) {
3013 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
3014 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
3016 ql_log(ql_log_warn, vha, 0xe078,
3017 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
3018 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3019 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
3020 a_guard, e_guard, cmd);
3028 /* If hardware_lock held on entry, might drop it, then reaquire */
3029 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3030 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3031 struct imm_ntfy_from_isp *ntfy)
3033 struct nack_to_isp *nack;
3034 struct qla_hw_data *ha = vha->hw;
3038 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3039 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3041 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
3043 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3044 "qla_target(%d): %s failed: unable to allocate "
3045 "request packet\n", vha->vp_idx, __func__);
3049 pkt->entry_type = NOTIFY_ACK_TYPE;
3050 pkt->entry_count = 1;
3051 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3053 nack = (struct nack_to_isp *)pkt;
3054 nack->ox_id = ntfy->ox_id;
3056 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3057 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3058 nack->u.isp24.flags = ntfy->u.isp24.flags &
3059 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3063 nack->u.isp24.flags |=
3064 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3066 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3067 nack->u.isp24.status = ntfy->u.isp24.status;
3068 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3069 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3070 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3071 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3072 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3073 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3075 qla2x00_start_iocbs(vha, vha->req);
3079 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3080 struct imm_ntfy_from_isp *imm, int ha_locked)
3082 unsigned long flags = 0;
3085 if (qlt_issue_marker(vha, ha_locked) < 0)
3089 rc = __qlt_send_term_imm_notif(vha, imm);
3093 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3098 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3099 rc = __qlt_send_term_imm_notif(vha, imm);
3103 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3108 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3111 /* If hardware_lock held on entry, might drop it, then reaquire */
3112 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3113 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
3114 struct qla_tgt_cmd *cmd,
3115 struct atio_from_isp *atio)
3117 struct ctio7_to_24xx *ctio24;
3118 struct qla_hw_data *ha = vha->hw;
3123 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3125 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
3127 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3128 "qla_target(%d): %s failed: unable to allocate "
3129 "request packet\n", vha->vp_idx, __func__);
3134 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3135 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3136 "qla_target(%d): Terminating cmd %p with "
3137 "incorrect state %d\n", vha->vp_idx, cmd,
3143 vha->tgt_counters.num_term_xchg_sent++;
3144 pkt->entry_count = 1;
3145 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3147 ctio24 = (struct ctio7_to_24xx *)pkt;
3148 ctio24->entry_type = CTIO_TYPE7;
3149 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3150 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3151 ctio24->vp_index = vha->vp_idx;
3152 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3153 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3154 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3155 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3156 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3157 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
3158 CTIO7_FLAGS_TERMINATE);
3159 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3160 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3162 /* Most likely, it isn't needed */
3163 ctio24->u.status1.residual = get_unaligned((uint32_t *)
3164 &atio->u.isp24.fcp_cmnd.add_cdb[
3165 atio->u.isp24.fcp_cmnd.add_cdb_len]);
3166 if (ctio24->u.status1.residual != 0)
3167 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3169 /* Memory Barrier */
3171 qla2x00_start_iocbs(vha, vha->req);
3175 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3176 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
3178 unsigned long flags = 0;
3181 if (qlt_issue_marker(vha, ha_locked) < 0)
3185 rc = __qlt_send_term_exchange(vha, cmd, atio);
3187 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3190 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3191 rc = __qlt_send_term_exchange(vha, cmd, atio);
3193 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3196 if (cmd && (!cmd->aborted ||
3197 !cmd->cmd_sent_to_fw)) {
3199 qlt_unmap_sg(vha, cmd);
3200 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3204 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3209 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3211 struct list_head free_list;
3212 struct qla_tgt_cmd *cmd, *tcmd;
3214 vha->hw->tgt.leak_exchg_thresh_hold =
3215 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3218 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3219 INIT_LIST_HEAD(&free_list);
3220 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3222 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3223 list_del(&cmd->cmd_list);
3224 /* This cmd was never sent to TCM. There is no need
3225 * to schedule free or call free_cmd
3228 vha->hw->tgt.num_qfull_cmds_alloc--;
3231 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3234 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3236 uint32_t total_leaked;
3238 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3240 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3241 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3243 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3244 "Chip reset due to exchange starvation: %d/%d.\n",
3245 total_leaked, vha->hw->cur_fw_xcb_count);
3247 if (IS_P3P_TYPE(vha->hw))
3248 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3250 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3251 qla2xxx_wake_dpc(vha);
3256 void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3258 struct qla_tgt *tgt = cmd->tgt;
3259 struct scsi_qla_host *vha = tgt->vha;
3260 struct se_cmd *se_cmd = &cmd->se_cmd;
3262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3263 "qla_target(%d): terminating exchange for aborted cmd=%p "
3264 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3268 cmd->cmd_flags |= BIT_6;
3270 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
3272 EXPORT_SYMBOL(qlt_abort_cmd);
3274 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3276 struct qla_tgt_sess *sess = cmd->sess;
3278 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3279 "%s: se_cmd[%p] ox_id %04x\n",
3280 __func__, &cmd->se_cmd,
3281 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3283 BUG_ON(cmd->cmd_in_wq);
3286 qlt_decr_num_pend_cmds(cmd->vha);
3288 BUG_ON(cmd->sg_mapped);
3289 cmd->jiffies_at_free = get_jiffies_64();
3290 if (unlikely(cmd->free_sg))
3293 if (!sess || !sess->se_sess) {
3297 cmd->jiffies_at_free = get_jiffies_64();
3298 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3300 EXPORT_SYMBOL(qlt_free_cmd);
3302 /* ha->hardware_lock supposed to be held on entry */
3303 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
3304 struct qla_tgt_cmd *cmd, void *ctio)
3306 struct qla_tgt_srr_ctio *sc;
3307 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3308 struct qla_tgt_srr_imm *imm;
3311 cmd->cmd_flags |= BIT_15;
3313 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
3314 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
3317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
3318 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
3323 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
3326 /* IRQ is already OFF */
3327 spin_lock(&tgt->srr_lock);
3328 sc->srr_id = tgt->ctio_srr_id;
3329 list_add_tail(&sc->srr_list_entry,
3330 &tgt->srr_ctio_list);
3331 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
3332 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
3333 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3335 list_for_each_entry(imm, &tgt->srr_imm_list,
3337 if (imm->srr_id == sc->srr_id) {
3343 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
3344 "Scheduling srr work\n");
3345 schedule_work(&tgt->srr_work);
3347 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
3348 "qla_target(%d): imm_srr_id "
3349 "== ctio_srr_id (%d), but there is no "
3350 "corresponding SRR IMM, deleting CTIO "
3351 "SRR %p\n", vha->vp_idx,
3352 tgt->ctio_srr_id, sc);
3353 list_del(&sc->srr_list_entry);
3354 spin_unlock(&tgt->srr_lock);
3360 spin_unlock(&tgt->srr_lock);
3362 struct qla_tgt_srr_imm *ti;
3364 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
3365 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
3367 spin_lock(&tgt->srr_lock);
3368 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
3370 if (imm->srr_id == tgt->ctio_srr_id) {
3371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
3372 "IMM SRR %p deleted (id %d)\n",
3374 list_del(&imm->srr_list_entry);
3375 qlt_reject_free_srr_imm(vha, imm, 1);
3378 spin_unlock(&tgt->srr_lock);
3387 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3389 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3390 struct qla_tgt_cmd *cmd, uint32_t status)
3395 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3397 cpu_to_le16(OF_TERM_EXCH));
3402 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3407 /* ha->hardware_lock supposed to be held on entry */
3408 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
3411 struct qla_hw_data *ha = vha->hw;
3414 if (ha->tgt.cmds[handle] != NULL) {
3415 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
3416 ha->tgt.cmds[handle] = NULL;
3422 /* ha->hardware_lock supposed to be held on entry */
3423 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3424 uint32_t handle, void *ctio)
3426 struct qla_tgt_cmd *cmd = NULL;
3428 /* Clear out internal marks */
3429 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
3430 CTIO_INTERMEDIATE_HANDLE_MARK);
3432 if (handle != QLA_TGT_NULL_HANDLE) {
3433 if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
3436 /* handle-1 is actually used */
3437 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
3438 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3439 "qla_target(%d): Wrong handle %x received\n",
3440 vha->vp_idx, handle);
3443 cmd = qlt_get_cmd(vha, handle);
3444 if (unlikely(cmd == NULL)) {
3445 ql_dbg(ql_dbg_tgt, vha, 0xe053,
3446 "qla_target(%d): Suspicious: unable to "
3447 "find the command with handle %x\n", vha->vp_idx,
3451 } else if (ctio != NULL) {
3452 /* We can't get loop ID from CTIO7 */
3453 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3454 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3455 "support NULL handles\n", vha->vp_idx);
3462 /* hardware_lock should be held by caller. */
3464 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3466 struct qla_hw_data *ha = vha->hw;
3470 qlt_unmap_sg(vha, cmd);
3472 handle = qlt_make_handle(vha);
3474 /* TODO: fix debug message type and ids. */
3475 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3476 ql_dbg(ql_dbg_io, vha, 0xff00,
3477 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
3478 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3479 cmd->write_data_transferred = 0;
3480 cmd->state = QLA_TGT_STATE_DATA_IN;
3482 ql_dbg(ql_dbg_io, vha, 0xff01,
3483 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
3485 ha->tgt.tgt_ops->handle_data(cmd);
3488 ql_dbg(ql_dbg_io, vha, 0xff03,
3489 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
3494 cmd->cmd_flags |= BIT_17;
3495 ha->tgt.tgt_ops->free_cmd(cmd);
3499 qlt_host_reset_handler(struct qla_hw_data *ha)
3501 struct qla_tgt_cmd *cmd;
3502 unsigned long flags;
3503 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3504 scsi_qla_host_t *vha = NULL;
3505 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
3508 if (!base_vha->hw->tgt.tgt_ops)
3511 if (!tgt || qla_ini_mode_enabled(base_vha)) {
3512 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
3513 "Target mode disabled\n");
3517 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
3518 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3519 base_vha->dpc_flags);
3521 spin_lock_irqsave(&ha->hardware_lock, flags);
3522 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
3523 cmd = qlt_get_cmd(base_vha, i);
3526 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3528 qlt_abort_cmd_on_host_reset(vha, cmd);
3530 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3535 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3537 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3538 uint32_t status, void *ctio)
3540 struct qla_hw_data *ha = vha->hw;
3541 struct se_cmd *se_cmd;
3542 struct qla_tgt_cmd *cmd;
3544 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3545 /* That could happen only in case of an error/reset/abort */
3546 if (status != CTIO_SUCCESS) {
3547 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3548 "Intermediate CTIO received"
3549 " (status %x)\n", status);
3554 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
3558 se_cmd = &cmd->se_cmd;
3559 cmd->cmd_sent_to_fw = 0;
3561 qlt_unmap_sg(vha, cmd);
3563 if (unlikely(status != CTIO_SUCCESS)) {
3564 switch (status & 0xFFFF) {
3565 case CTIO_LIP_RESET:
3566 case CTIO_TARGET_RESET:
3568 /* driver request abort via Terminate exchange */
3570 case CTIO_INVALID_RX_ID:
3572 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3573 "qla_target(%d): CTIO with "
3574 "status %#x received, state %x, se_cmd %p, "
3575 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3576 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3577 status, cmd->state, se_cmd);
3580 case CTIO_PORT_LOGGED_OUT:
3581 case CTIO_PORT_UNAVAILABLE:
3583 int logged_out = (status & 0xFFFF);
3584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3585 "qla_target(%d): CTIO with %s status %x "
3586 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3587 (logged_out == CTIO_PORT_LOGGED_OUT) ?
3588 "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3589 status, cmd->state, se_cmd);
3591 if (logged_out && cmd->sess) {
3593 * Session is already logged out, but we need
3594 * to notify initiator, who's not aware of this
3596 cmd->sess->logout_on_delete = 0;
3597 cmd->sess->send_els_logo = 1;
3598 qlt_schedule_sess_for_deletion(cmd->sess, true);
3602 case CTIO_SRR_RECEIVED:
3603 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
3604 "qla_target(%d): CTIO with SRR_RECEIVED"
3605 " status %x received (state %x, se_cmd %p)\n",
3606 vha->vp_idx, status, cmd->state, se_cmd);
3607 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
3612 case CTIO_DIF_ERROR: {
3613 struct ctio_crc_from_fw *crc =
3614 (struct ctio_crc_from_fw *)ctio;
3615 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3616 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3617 vha->vp_idx, status, cmd->state, se_cmd,
3618 *((u64 *)&crc->actual_dif[0]),
3619 *((u64 *)&crc->expected_dif[0]));
3621 if (qlt_handle_dif_error(vha, cmd, ctio)) {
3622 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3623 /* scsi Write/xfer rdy complete */
3626 /* scsi read/xmit respond complete
3627 * call handle dif to send scsi status
3628 * rather than terminate exchange.
3630 cmd->state = QLA_TGT_STATE_PROCESSED;
3631 ha->tgt.tgt_ops->handle_dif_err(cmd);
3635 /* Need to generate a SCSI good completion.
3636 * because FW did not send scsi status.
3644 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3645 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3646 vha->vp_idx, status, cmd->state, se_cmd);
3651 /* "cmd->aborted" means
3652 * cmd is already aborted/terminated, we don't
3653 * need to terminate again. The exchange is already
3654 * cleaned up/freed at FW level. Just cleanup at driver
3657 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3659 cmd->cmd_flags |= BIT_13;
3660 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3666 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3667 cmd->cmd_flags |= BIT_12;
3668 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3669 cmd->state = QLA_TGT_STATE_DATA_IN;
3671 if (status == CTIO_SUCCESS)
3672 cmd->write_data_transferred = 1;
3674 ha->tgt.tgt_ops->handle_data(cmd);
3676 } else if (cmd->aborted) {
3677 cmd->cmd_flags |= BIT_18;
3678 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3679 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3681 cmd->cmd_flags |= BIT_19;
3682 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3683 "qla_target(%d): A command in state (%d) should "
3684 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3687 if (unlikely(status != CTIO_SUCCESS) &&
3689 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3693 ha->tgt.tgt_ops->free_cmd(cmd);
3696 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3701 switch (task_codes) {
3702 case ATIO_SIMPLE_QUEUE:
3703 fcp_task_attr = TCM_SIMPLE_TAG;
3705 case ATIO_HEAD_OF_QUEUE:
3706 fcp_task_attr = TCM_HEAD_TAG;
3708 case ATIO_ORDERED_QUEUE:
3709 fcp_task_attr = TCM_ORDERED_TAG;
3711 case ATIO_ACA_QUEUE:
3712 fcp_task_attr = TCM_ACA_TAG;
3715 fcp_task_attr = TCM_SIMPLE_TAG;
3718 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3719 "qla_target: unknown task code %x, use ORDERED instead\n",
3721 fcp_task_attr = TCM_ORDERED_TAG;
3725 return fcp_task_attr;
3728 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
3731 * Process context for I/O path into tcm_qla2xxx code
3733 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3735 scsi_qla_host_t *vha = cmd->vha;
3736 struct qla_hw_data *ha = vha->hw;
3737 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3738 struct qla_tgt_sess *sess = cmd->sess;
3739 struct atio_from_isp *atio = &cmd->atio;
3741 unsigned long flags;
3742 uint32_t data_length;
3743 int ret, fcp_task_attr, data_dir, bidi = 0;
3746 cmd->cmd_flags |= BIT_1;
3751 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3752 "cmd with tag %u is aborted\n",
3753 cmd->atio.u.isp24.exchange_addr);
3757 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3758 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3759 cmd->unpacked_lun = scsilun_to_int(
3760 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
3762 if (atio->u.isp24.fcp_cmnd.rddata &&
3763 atio->u.isp24.fcp_cmnd.wrdata) {
3765 data_dir = DMA_TO_DEVICE;
3766 } else if (atio->u.isp24.fcp_cmnd.rddata)
3767 data_dir = DMA_FROM_DEVICE;
3768 else if (atio->u.isp24.fcp_cmnd.wrdata)
3769 data_dir = DMA_TO_DEVICE;
3771 data_dir = DMA_NONE;
3773 fcp_task_attr = qlt_get_fcp_task_attr(vha,
3774 atio->u.isp24.fcp_cmnd.task_attr);
3775 data_length = be32_to_cpu(get_unaligned((uint32_t *)
3776 &atio->u.isp24.fcp_cmnd.add_cdb[
3777 atio->u.isp24.fcp_cmnd.add_cdb_len]));
3779 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3780 fcp_task_attr, data_dir, bidi);
3784 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3786 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3787 ha->tgt.tgt_ops->put_sess(sess);
3788 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3792 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
3794 * cmd has not sent to target yet, so pass NULL as the second
3795 * argument to qlt_send_term_exchange() and free the memory here.
3797 cmd->cmd_flags |= BIT_2;
3798 spin_lock_irqsave(&ha->hardware_lock, flags);
3799 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
3801 qlt_decr_num_pend_cmds(vha);
3802 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3803 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3805 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3806 ha->tgt.tgt_ops->put_sess(sess);
3807 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3810 static void qlt_do_work(struct work_struct *work)
3812 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3813 scsi_qla_host_t *vha = cmd->vha;
3814 unsigned long flags;
3816 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3817 list_del(&cmd->cmd_list);
3818 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3823 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3824 struct qla_tgt_sess *sess,
3825 struct atio_from_isp *atio)
3827 struct se_session *se_sess = sess->se_sess;
3828 struct qla_tgt_cmd *cmd;
3831 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3835 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3836 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3838 memcpy(&cmd->atio, atio, sizeof(*atio));
3839 cmd->state = QLA_TGT_STATE_NEW;
3840 cmd->tgt = vha->vha_tgt.qla_tgt;
3841 qlt_incr_num_pend_cmds(vha);
3843 cmd->se_cmd.map_tag = tag;
3845 cmd->loop_id = sess->loop_id;
3846 cmd->conf_compl_supported = sess->conf_compl_supported;
3849 cmd->jiffies_at_alloc = get_jiffies_64();
3851 cmd->reset_count = vha->hw->chip_reset;
3856 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3859 static void qlt_create_sess_from_atio(struct work_struct *work)
3861 struct qla_tgt_sess_op *op = container_of(work,
3862 struct qla_tgt_sess_op, work);
3863 scsi_qla_host_t *vha = op->vha;
3864 struct qla_hw_data *ha = vha->hw;
3865 struct qla_tgt_sess *sess;
3866 struct qla_tgt_cmd *cmd;
3867 unsigned long flags;
3868 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3870 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3871 list_del(&op->cmd_list);
3872 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3875 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3876 "sess_op with tag %u is aborted\n",
3877 op->atio.u.isp24.exchange_addr);
3881 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3882 "qla_target(%d): Unable to find wwn login"
3883 " (s_id %x:%x:%x), trying to create it manually\n",
3884 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3886 if (op->atio.u.raw.entry_count > 1) {
3887 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3888 "Dropping multy entry atio %p\n", &op->atio);
3892 sess = qlt_make_local_sess(vha, s_id);
3893 /* sess has an extra creation ref. */
3898 * Now obtain a pre-allocated session tag using the original op->atio
3899 * packet header, and dispatch into __qlt_do_work() using the existing
3902 cmd = qlt_get_tag(vha, sess, &op->atio);
3904 spin_lock_irqsave(&ha->hardware_lock, flags);
3905 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
3906 ha->tgt.tgt_ops->put_sess(sess);
3907 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3912 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3913 * the extra reference taken above by qlt_make_local_sess()
3920 spin_lock_irqsave(&ha->hardware_lock, flags);
3921 qlt_send_term_exchange(vha, NULL, &op->atio, 1);
3922 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3927 /* ha->hardware_lock supposed to be held on entry */
3928 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3929 struct atio_from_isp *atio)
3931 struct qla_hw_data *ha = vha->hw;
3932 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3933 struct qla_tgt_sess *sess;
3934 struct qla_tgt_cmd *cmd;
3936 if (unlikely(tgt->tgt_stop)) {
3937 ql_dbg(ql_dbg_io, vha, 0x3061,
3938 "New command while device %p is shutting down\n", tgt);
3942 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3943 if (unlikely(!sess)) {
3944 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3949 memcpy(&op->atio, atio, sizeof(*atio));
3952 spin_lock(&vha->cmd_list_lock);
3953 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3954 spin_unlock(&vha->cmd_list_lock);
3956 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3957 queue_work(qla_tgt_wq, &op->work);
3961 /* Another WWN used to have our s_id. Our PLOGI scheduled its
3962 * session deletion, but it's still in sess_del_work wq */
3963 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3964 ql_dbg(ql_dbg_io, vha, 0x3061,
3965 "New command while old session %p is being deleted\n",
3971 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3973 kref_get(&sess->se_sess->sess_kref);
3975 cmd = qlt_get_tag(vha, sess, atio);
3977 ql_dbg(ql_dbg_io, vha, 0x3062,
3978 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
3979 ha->tgt.tgt_ops->put_sess(sess);
3984 cmd->cmd_flags |= BIT_0;
3985 cmd->se_cmd.cpuid = -1;
3987 spin_lock(&vha->cmd_list_lock);
3988 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
3989 spin_unlock(&vha->cmd_list_lock);
3991 INIT_WORK(&cmd->work, qlt_do_work);
3992 if (ha->msix_count) {
3993 cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
3994 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
3995 queue_work_on(smp_processor_id(), qla_tgt_wq,
3998 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4001 queue_work(qla_tgt_wq, &cmd->work);
4007 /* ha->hardware_lock supposed to be held on entry */
4008 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
4009 int fn, void *iocb, int flags)
4011 struct scsi_qla_host *vha = sess->vha;
4012 struct qla_hw_data *ha = vha->hw;
4013 struct qla_tgt_mgmt_cmd *mcmd;
4014 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4018 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4020 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4021 "qla_target(%d): Allocation of management "
4022 "command failed, some commands and their data could "
4023 "leak\n", vha->vp_idx);
4026 memset(mcmd, 0, sizeof(*mcmd));
4030 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4031 sizeof(mcmd->orig_iocb.imm_ntfy));
4033 mcmd->tmr_func = fn;
4034 mcmd->flags = flags;
4035 mcmd->reset_count = vha->hw->chip_reset;
4038 case QLA_TGT_CLEAR_ACA:
4039 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
4040 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
4041 tmr_func = TMR_CLEAR_ACA;
4044 case QLA_TGT_TARGET_RESET:
4045 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
4046 "qla_target(%d): TARGET_RESET received\n",
4048 tmr_func = TMR_TARGET_WARM_RESET;
4051 case QLA_TGT_LUN_RESET:
4052 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
4053 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
4054 tmr_func = TMR_LUN_RESET;
4055 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4058 case QLA_TGT_CLEAR_TS:
4059 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
4060 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
4061 tmr_func = TMR_CLEAR_TASK_SET;
4064 case QLA_TGT_ABORT_TS:
4065 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
4066 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
4067 tmr_func = TMR_ABORT_TASK_SET;
4070 case QLA_TGT_ABORT_ALL:
4071 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
4072 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
4077 case QLA_TGT_ABORT_ALL_SESS:
4078 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
4079 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
4084 case QLA_TGT_NEXUS_LOSS_SESS:
4085 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
4086 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
4091 case QLA_TGT_NEXUS_LOSS:
4092 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
4093 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
4098 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
4099 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
4100 sess->vha->vp_idx, fn);
4101 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4105 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
4107 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
4108 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
4109 sess->vha->vp_idx, res);
4110 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4117 /* ha->hardware_lock supposed to be held on entry */
4118 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4120 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4121 struct qla_hw_data *ha = vha->hw;
4122 struct qla_tgt *tgt;
4123 struct qla_tgt_sess *sess;
4124 uint32_t lun, unpacked_lun;
4126 unsigned long flags;
4128 tgt = vha->vha_tgt.qla_tgt;
4130 lun = a->u.isp24.fcp_cmnd.lun;
4131 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4133 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4134 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4135 a->u.isp24.fcp_hdr.s_id);
4136 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4138 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4141 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
4142 "qla_target(%d): task mgmt fn 0x%x for "
4143 "non-existant session\n", vha->vp_idx, fn);
4144 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
4145 sizeof(struct atio_from_isp));
4148 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
4151 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4154 /* ha->hardware_lock supposed to be held on entry */
4155 static int __qlt_abort_task(struct scsi_qla_host *vha,
4156 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
4158 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4159 struct qla_hw_data *ha = vha->hw;
4160 struct qla_tgt_mgmt_cmd *mcmd;
4161 uint32_t lun, unpacked_lun;
4164 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4166 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4167 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4168 vha->vp_idx, __func__);
4171 memset(mcmd, 0, sizeof(*mcmd));
4174 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4175 sizeof(mcmd->orig_iocb.imm_ntfy));
4177 lun = a->u.isp24.fcp_cmnd.lun;
4178 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4179 mcmd->reset_count = vha->hw->chip_reset;
4181 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
4182 le16_to_cpu(iocb->u.isp2x.seq_id));
4184 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4185 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4187 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4194 /* ha->hardware_lock supposed to be held on entry */
4195 static int qlt_abort_task(struct scsi_qla_host *vha,
4196 struct imm_ntfy_from_isp *iocb)
4198 struct qla_hw_data *ha = vha->hw;
4199 struct qla_tgt_sess *sess;
4201 unsigned long flags;
4203 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4205 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4206 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4207 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4211 "qla_target(%d): task abort for unexisting "
4212 "session\n", vha->vp_idx);
4213 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4214 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4217 return __qlt_abort_task(vha, iocb, sess);
4220 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4222 if (fcport->tgt_session) {
4223 if (rc != MBS_COMMAND_COMPLETE) {
4224 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4225 "%s: se_sess %p / sess %p from"
4226 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4227 " LOGO failed: %#x\n",
4229 fcport->tgt_session->se_sess,
4230 fcport->tgt_session,
4231 fcport->port_name, fcport->loop_id,
4232 fcport->d_id.b.domain, fcport->d_id.b.area,
4233 fcport->d_id.b.al_pa, rc);
4236 fcport->tgt_session->logout_completed = 1;
4241 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4243 * Schedules sessions with matching port_id/loop_id but different wwn for
4244 * deletion. Returns existing session with matching wwn if present.
4247 static struct qla_tgt_sess *
4248 qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4249 port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess)
4251 struct qla_tgt_sess *sess = NULL, *other_sess;
4254 *conflict_sess = NULL;
4256 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4258 other_wwn = wwn_to_u64(other_sess->port_name);
4260 if (wwn == other_wwn) {
4266 /* find other sess with nport_id collision */
4267 if (port_id.b24 == other_sess->s_id.b24) {
4268 if (loop_id != other_sess->loop_id) {
4269 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4270 "Invalidating sess %p loop_id %d wwn %llx.\n",
4271 other_sess, other_sess->loop_id, other_wwn);
4274 * logout_on_delete is set by default, but another
4275 * session that has the same s_id/loop_id combo
4276 * might have cleared it when requested this session
4277 * deletion, so don't touch it
4279 qlt_schedule_sess_for_deletion(other_sess, true);
4282 * Another wwn used to have our s_id/loop_id
4283 * kill the session, but don't free the loop_id
4285 other_sess->keep_nport_handle = 1;
4286 *conflict_sess = other_sess;
4287 qlt_schedule_sess_for_deletion(other_sess,
4293 /* find other sess with nport handle collision */
4294 if (loop_id == other_sess->loop_id) {
4295 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4296 "Invalidating sess %p loop_id %d wwn %llx.\n",
4297 other_sess, other_sess->loop_id, other_wwn);
4299 /* Same loop_id but different s_id
4300 * Ok to kill and logout */
4301 qlt_schedule_sess_for_deletion(other_sess, true);
4308 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4309 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4311 struct qla_tgt_sess_op *op;
4312 struct qla_tgt_cmd *cmd;
4316 key = (((u32)s_id->b.domain << 16) |
4317 ((u32)s_id->b.area << 8) |
4318 ((u32)s_id->b.al_pa));
4320 spin_lock(&vha->cmd_list_lock);
4321 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4322 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4323 if (op_key == key) {
4328 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4329 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4330 if (cmd_key == key) {
4335 spin_unlock(&vha->cmd_list_lock);
4341 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4343 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4344 struct imm_ntfy_from_isp *iocb)
4346 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4347 struct qla_hw_data *ha = vha->hw;
4348 struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL;
4354 qlt_plogi_ack_t *pla;
4355 unsigned long flags;
4357 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4359 port_id.b.domain = iocb->u.isp24.port_id[2];
4360 port_id.b.area = iocb->u.isp24.port_id[1];
4361 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4362 port_id.b.rsvd_1 = 0;
4364 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4366 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
4367 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
4368 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
4370 /* res = 1 means ack at the end of thread
4371 * res = 0 means ack async/later.
4373 switch (iocb->u.isp24.status_subcode) {
4376 /* Mark all stale commands in qla_tgt_wq for deletion */
4377 abort_cmds_for_s_id(vha, &port_id);
4380 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4381 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4382 port_id, loop_id, &conflict_sess);
4383 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4386 if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) {
4391 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4393 qlt_send_term_imm_notif(vha, iocb, 1);
4402 qlt_plogi_ack_link(vha, pla, conflict_sess,
4403 QLT_PLOGI_LINK_CONFLICT);
4408 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4410 * Under normal circumstances we want to release nport handle
4411 * during LOGO process to avoid nport handle leaks inside FW.
4412 * The exception is when LOGO is done while another PLOGI with
4413 * the same nport handle is waiting as might be the case here.
4414 * Note: there is always a possibily of a race where session
4415 * deletion has already started for other reasons (e.g. ACL
4416 * removal) and now PLOGI arrives:
4417 * 1. if PLOGI arrived in FW after nport handle has been freed,
4418 * FW must have assigned this PLOGI a new/same handle and we
4419 * can proceed ACK'ing it as usual when session deletion
4421 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4422 * bit reached it, the handle has now been released. We'll
4423 * get an error when we ACK this PLOGI. Nothing will be sent
4424 * back to initiator. Initiator should eventually retry
4425 * PLOGI and situation will correct itself.
4427 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4428 (sess->s_id.b24 == port_id.b24));
4429 qlt_schedule_sess_for_deletion(sess, true);
4433 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4436 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4437 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4438 loop_id, &conflict_sess);
4439 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4442 if (conflict_sess) {
4443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4444 "PRLI with conflicting sess %p port %8phC\n",
4445 conflict_sess, conflict_sess->port_name);
4446 qlt_send_term_imm_notif(vha, iocb, 1);
4452 if (sess->deleted) {
4454 * Impatient initiator sent PRLI before last
4455 * PLOGI could finish. Will force him to re-try,
4456 * while last one finishes.
4458 ql_log(ql_log_warn, sess->vha, 0xf095,
4459 "sess %p PRLI received, before plogi ack.\n",
4461 qlt_send_term_imm_notif(vha, iocb, 1);
4467 * This shouldn't happen under normal circumstances,
4468 * since we have deleted the old session during PLOGI
4470 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4471 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4472 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4475 sess->loop_id = loop_id;
4476 sess->s_id = port_id;
4479 sess->conf_compl_supported = 1;
4482 res = 1; /* send notify ack */
4484 /* Make session global (not used in fabric mode) */
4485 if (ha->current_topology != ISP_CFG_F) {
4486 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4487 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4488 qla2xxx_wake_dpc(vha);
4490 /* todo: else - create sess here. */
4491 res = 1; /* send notify ack */
4498 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4503 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4504 if (tgt->link_reinit_iocb_pending) {
4505 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
4507 tgt->link_reinit_iocb_pending = 0;
4509 res = 1; /* send notify ack */
4513 case ELS_FLOGI: /* should never happen */
4515 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
4516 "qla_target(%d): Unsupported ELS command %x "
4517 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
4518 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4525 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
4529 * FIXME: Reject non zero SRR relative offset until we can test
4530 * this code properly.
4532 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
4535 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
4536 size_t first_offset = 0, rem_offset = offset, tmp = 0;
4537 int i, sg_srr_cnt, bufflen = 0;
4539 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
4540 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
4541 "cmd->sg_cnt: %u, direction: %d\n",
4542 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
4544 if (!cmd->sg || !cmd->sg_cnt) {
4545 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
4546 "Missing cmd->sg or zero cmd->sg_cnt in"
4547 " qla_tgt_set_data_offset\n");
4551 * Walk the current cmd->sg list until we locate the new sg_srr_start
4553 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
4554 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
4555 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
4556 i, sg, sg_page(sg), sg->length, sg->offset);
4558 if ((sg->length + tmp) > offset) {
4559 first_offset = rem_offset;
4561 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
4562 "Found matching sg[%d], using %p as sg_srr_start, "
4563 "and using first_offset: %zu\n", i, sg,
4568 rem_offset -= sg->length;
4571 if (!sg_srr_start) {
4572 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
4573 "Unable to locate sg_srr_start for offset: %u\n", offset);
4576 sg_srr_cnt = (cmd->sg_cnt - i);
4578 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
4580 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
4581 "Unable to allocate sgp\n");
4584 sg_init_table(sg_srr, sg_srr_cnt);
4587 * Walk the remaining list for sg_srr_start, mapping to the newly
4588 * allocated sg_srr taking first_offset into account.
4590 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
4592 sg_set_page(sgp, sg_page(sg),
4593 (sg->length - first_offset), first_offset);
4596 sg_set_page(sgp, sg_page(sg), sg->length, 0);
4598 bufflen += sgp->length;
4606 cmd->sg_cnt = sg_srr_cnt;
4607 cmd->bufflen = bufflen;
4608 cmd->offset += offset;
4611 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
4612 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
4614 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
4616 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
4619 if (cmd->sg_cnt < 0)
4622 if (cmd->bufflen < 0)
4629 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
4630 uint32_t srr_rel_offs, int *xmit_type)
4632 int res = 0, rel_offs;
4634 rel_offs = srr_rel_offs - cmd->offset;
4635 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
4636 srr_rel_offs, rel_offs);
4638 *xmit_type = QLA_TGT_XMIT_ALL;
4641 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
4642 "qla_target(%d): SRR rel_offs (%d) < 0",
4643 cmd->vha->vp_idx, rel_offs);
4645 } else if (rel_offs == cmd->bufflen)
4646 *xmit_type = QLA_TGT_XMIT_STATUS;
4647 else if (rel_offs > 0)
4648 res = qlt_set_data_offset(cmd, rel_offs);
4653 /* No locks, thread context */
4654 static void qlt_handle_srr(struct scsi_qla_host *vha,
4655 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
4657 struct imm_ntfy_from_isp *ntfy =
4658 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
4659 struct qla_hw_data *ha = vha->hw;
4660 struct qla_tgt_cmd *cmd = sctio->cmd;
4661 struct se_cmd *se_cmd = &cmd->se_cmd;
4662 unsigned long flags;
4663 int xmit_type = 0, resp = 0;
4667 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
4668 srr_ui = ntfy->u.isp24.srr_ui;
4670 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
4675 spin_lock_irqsave(&ha->hardware_lock, flags);
4676 qlt_send_notify_ack(vha, ntfy,
4677 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4678 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4679 xmit_type = QLA_TGT_XMIT_STATUS;
4682 case SRR_IU_DATA_IN:
4683 if (!cmd->sg || !cmd->sg_cnt) {
4684 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
4685 "Unable to process SRR_IU_DATA_IN due to"
4686 " missing cmd->sg, state: %d\n", cmd->state);
4690 if (se_cmd->scsi_status != 0) {
4691 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
4692 "Rejecting SRR_IU_DATA_IN with non GOOD "
4696 cmd->bufflen = se_cmd->data_length;
4698 if (qlt_has_data(cmd)) {
4699 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
4701 spin_lock_irqsave(&ha->hardware_lock, flags);
4702 qlt_send_notify_ack(vha, ntfy,
4703 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4704 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4707 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
4708 "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
4709 vha->vp_idx, se_cmd->tag,
4710 cmd->se_cmd.scsi_status);
4714 case SRR_IU_DATA_OUT:
4715 if (!cmd->sg || !cmd->sg_cnt) {
4716 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
4717 "Unable to process SRR_IU_DATA_OUT due to"
4718 " missing cmd->sg\n");
4722 if (se_cmd->scsi_status != 0) {
4723 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
4724 "Rejecting SRR_IU_DATA_OUT"
4725 " with non GOOD scsi_status\n");
4728 cmd->bufflen = se_cmd->data_length;
4730 if (qlt_has_data(cmd)) {
4731 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
4733 spin_lock_irqsave(&ha->hardware_lock, flags);
4734 qlt_send_notify_ack(vha, ntfy,
4735 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4736 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4737 if (xmit_type & QLA_TGT_XMIT_DATA) {
4738 cmd->cmd_flags |= BIT_8;
4739 qlt_rdy_to_xfer(cmd);
4742 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
4743 "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
4744 vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
4749 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
4750 "qla_target(%d): Unknown srr_ui value %x",
4751 vha->vp_idx, srr_ui);
4755 /* Transmit response in case of status and data-in cases */
4757 cmd->cmd_flags |= BIT_7;
4758 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
4764 spin_lock_irqsave(&ha->hardware_lock, flags);
4765 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
4766 NOTIFY_ACK_SRR_FLAGS_REJECT,
4767 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4768 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4769 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4770 cmd->state = QLA_TGT_STATE_DATA_IN;
4773 cmd->cmd_flags |= BIT_9;
4774 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
4776 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4779 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
4780 struct qla_tgt_srr_imm *imm, int ha_locked)
4782 struct qla_hw_data *ha = vha->hw;
4783 unsigned long flags = 0;
4787 spin_lock_irqsave(&ha->hardware_lock, flags);
4790 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
4791 NOTIFY_ACK_SRR_FLAGS_REJECT,
4792 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4793 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4797 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4803 static void qlt_handle_srr_work(struct work_struct *work)
4805 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
4806 struct scsi_qla_host *vha = tgt->vha;
4807 struct qla_tgt_srr_ctio *sctio;
4808 unsigned long flags;
4810 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
4814 spin_lock_irqsave(&tgt->srr_lock, flags);
4815 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
4816 struct qla_tgt_srr_imm *imm, *i, *ti;
4817 struct qla_tgt_cmd *cmd;
4818 struct se_cmd *se_cmd;
4821 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
4823 if (i->srr_id == sctio->srr_id) {
4824 list_del(&i->srr_list_entry);
4826 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
4827 "qla_target(%d): There must be "
4828 "only one IMM SRR per CTIO SRR "
4829 "(IMM SRR %p, id %d, CTIO %p\n",
4830 vha->vp_idx, i, i->srr_id, sctio);
4831 qlt_reject_free_srr_imm(tgt->vha, i, 0);
4837 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
4838 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
4842 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
4843 "Not found matching IMM for SRR CTIO (id %d)\n",
4847 list_del(&sctio->srr_list_entry);
4849 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4853 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4854 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4863 se_cmd = &cmd->se_cmd;
4865 cmd->sg_cnt = se_cmd->t_data_nents;
4866 cmd->sg = se_cmd->t_data_sg;
4868 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
4869 "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
4870 cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
4871 se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
4873 qlt_handle_srr(vha, sctio, imm);
4879 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4882 /* ha->hardware_lock supposed to be held on entry */
4883 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4884 struct imm_ntfy_from_isp *iocb)
4886 struct qla_tgt_srr_imm *imm;
4887 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4888 struct qla_tgt_srr_ctio *sctio;
4892 ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
4895 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
4897 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
4899 /* IRQ is already OFF */
4900 spin_lock(&tgt->srr_lock);
4901 imm->srr_id = tgt->imm_srr_id;
4902 list_add_tail(&imm->srr_list_entry,
4903 &tgt->srr_imm_list);
4904 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
4905 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4906 imm, imm->srr_id, iocb->u.isp24.srr_ui);
4907 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
4909 list_for_each_entry(sctio, &tgt->srr_ctio_list,
4911 if (sctio->srr_id == imm->srr_id) {
4917 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
4918 "Scheduling srr work\n");
4919 schedule_work(&tgt->srr_work);
4921 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
4922 "qla_target(%d): imm_srr_id "
4923 "== ctio_srr_id (%d), but there is no "
4924 "corresponding SRR CTIO, deleting IMM "
4925 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
4927 list_del(&imm->srr_list_entry);
4931 spin_unlock(&tgt->srr_lock);
4935 spin_unlock(&tgt->srr_lock);
4937 struct qla_tgt_srr_ctio *ts;
4939 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
4940 "qla_target(%d): Unable to allocate SRR IMM "
4941 "entry, SRR request will be rejected\n", vha->vp_idx);
4943 /* IRQ is already OFF */
4944 spin_lock(&tgt->srr_lock);
4945 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
4947 if (sctio->srr_id == tgt->imm_srr_id) {
4948 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
4949 "CTIO SRR %p deleted (id %d)\n",
4950 sctio, sctio->srr_id);
4951 list_del(&sctio->srr_list_entry);
4952 qlt_send_term_exchange(vha, sctio->cmd,
4953 &sctio->cmd->atio, 1);
4957 spin_unlock(&tgt->srr_lock);
4964 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
4965 NOTIFY_ACK_SRR_FLAGS_REJECT,
4966 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4967 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4971 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4973 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
4974 struct imm_ntfy_from_isp *iocb)
4976 struct qla_hw_data *ha = vha->hw;
4977 uint32_t add_flags = 0;
4978 int send_notify_ack = 1;
4981 status = le16_to_cpu(iocb->u.isp2x.status);
4983 case IMM_NTFY_LIP_RESET:
4985 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
4986 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4987 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
4988 iocb->u.isp24.status_subcode);
4990 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4991 send_notify_ack = 0;
4995 case IMM_NTFY_LIP_LINK_REINIT:
4997 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4998 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
4999 "qla_target(%d): LINK REINIT (loop %#x, "
5000 "subcode %x)\n", vha->vp_idx,
5001 le16_to_cpu(iocb->u.isp24.nport_handle),
5002 iocb->u.isp24.status_subcode);
5003 if (tgt->link_reinit_iocb_pending) {
5004 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
5007 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5008 tgt->link_reinit_iocb_pending = 1;
5010 * QLogic requires to wait after LINK REINIT for possible
5011 * PDISC or ADISC ELS commands
5013 send_notify_ack = 0;
5017 case IMM_NTFY_PORT_LOGOUT:
5018 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5019 "qla_target(%d): Port logout (loop "
5020 "%#x, subcode %x)\n", vha->vp_idx,
5021 le16_to_cpu(iocb->u.isp24.nport_handle),
5022 iocb->u.isp24.status_subcode);
5024 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5025 send_notify_ack = 0;
5026 /* The sessions will be cleared in the callback, if needed */
5029 case IMM_NTFY_GLBL_TPRLO:
5030 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5031 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5032 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5033 send_notify_ack = 0;
5034 /* The sessions will be cleared in the callback, if needed */
5037 case IMM_NTFY_PORT_CONFIG:
5038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5039 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5041 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5042 send_notify_ack = 0;
5043 /* The sessions will be cleared in the callback, if needed */
5046 case IMM_NTFY_GLBL_LOGO:
5047 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5048 "qla_target(%d): Link failure detected\n",
5050 /* I_T nexus loss */
5051 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5052 send_notify_ack = 0;
5055 case IMM_NTFY_IOCB_OVERFLOW:
5056 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5057 "qla_target(%d): Cannot provide requested "
5058 "capability (IOCB overflowed the immediate notify "
5059 "resource count)\n", vha->vp_idx);
5062 case IMM_NTFY_ABORT_TASK:
5063 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5064 "qla_target(%d): Abort Task (S %08x I %#x -> "
5065 "L %#x)\n", vha->vp_idx,
5066 le16_to_cpu(iocb->u.isp2x.seq_id),
5067 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5068 le16_to_cpu(iocb->u.isp2x.lun));
5069 if (qlt_abort_task(vha, iocb) == 0)
5070 send_notify_ack = 0;
5073 case IMM_NTFY_RESOURCE:
5074 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5075 "qla_target(%d): Out of resources, host %ld\n",
5076 vha->vp_idx, vha->host_no);
5079 case IMM_NTFY_MSG_RX:
5080 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5081 "qla_target(%d): Immediate notify task %x\n",
5082 vha->vp_idx, iocb->u.isp2x.task_flags);
5083 if (qlt_handle_task_mgmt(vha, iocb) == 0)
5084 send_notify_ack = 0;
5088 if (qlt_24xx_handle_els(vha, iocb) == 0)
5089 send_notify_ack = 0;
5093 qlt_prepare_srr_imm(vha, iocb);
5094 send_notify_ack = 0;
5098 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5099 "qla_target(%d): Received unknown immediate "
5100 "notify status %x\n", vha->vp_idx, status);
5104 if (send_notify_ack)
5105 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
5109 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5110 * This function sends busy to ISP 2xxx or 24xx.
5112 static int __qlt_send_busy(struct scsi_qla_host *vha,
5113 struct atio_from_isp *atio, uint16_t status)
5115 struct ctio7_to_24xx *ctio24;
5116 struct qla_hw_data *ha = vha->hw;
5118 struct qla_tgt_sess *sess = NULL;
5119 unsigned long flags;
5121 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5122 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5123 atio->u.isp24.fcp_hdr.s_id);
5124 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5126 qlt_send_term_exchange(vha, NULL, atio, 1);
5129 /* Sending marker isn't necessary, since we called from ISR */
5131 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
5133 ql_dbg(ql_dbg_io, vha, 0x3063,
5134 "qla_target(%d): %s failed: unable to allocate "
5135 "request packet", vha->vp_idx, __func__);
5139 vha->tgt_counters.num_q_full_sent++;
5140 pkt->entry_count = 1;
5141 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5143 ctio24 = (struct ctio7_to_24xx *)pkt;
5144 ctio24->entry_type = CTIO_TYPE7;
5145 ctio24->nport_handle = sess->loop_id;
5146 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5147 ctio24->vp_index = vha->vp_idx;
5148 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
5149 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
5150 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
5151 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5152 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
5154 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5155 CTIO7_FLAGS_DONT_RET_CTIO);
5157 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5158 * if the explicit conformation is used.
5160 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
5161 ctio24->u.status1.scsi_status = cpu_to_le16(status);
5162 /* Memory Barrier */
5164 qla2x00_start_iocbs(vha, vha->req);
5169 * This routine is used to allocate a command for either a QFull condition
5170 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5174 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5175 struct atio_from_isp *atio, uint16_t status, int qfull)
5177 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5178 struct qla_hw_data *ha = vha->hw;
5179 struct qla_tgt_sess *sess;
5180 struct se_session *se_sess;
5181 struct qla_tgt_cmd *cmd;
5184 if (unlikely(tgt->tgt_stop)) {
5185 ql_dbg(ql_dbg_io, vha, 0x300a,
5186 "New command while device %p is shutting down\n", tgt);
5190 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5191 vha->hw->tgt.num_qfull_cmds_dropped++;
5192 if (vha->hw->tgt.num_qfull_cmds_dropped >
5193 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
5194 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
5195 vha->hw->tgt.num_qfull_cmds_dropped;
5197 ql_dbg(ql_dbg_io, vha, 0x3068,
5198 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5199 vha->vp_idx, __func__,
5200 vha->hw->tgt.num_qfull_cmds_dropped);
5202 qlt_chk_exch_leak_thresh_hold(vha);
5206 sess = ha->tgt.tgt_ops->find_sess_by_s_id
5207 (vha, atio->u.isp24.fcp_hdr.s_id);
5211 se_sess = sess->se_sess;
5213 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
5217 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
5219 ql_dbg(ql_dbg_io, vha, 0x3009,
5220 "qla_target(%d): %s: Allocation of cmd failed\n",
5221 vha->vp_idx, __func__);
5223 vha->hw->tgt.num_qfull_cmds_dropped++;
5224 if (vha->hw->tgt.num_qfull_cmds_dropped >
5225 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
5226 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
5227 vha->hw->tgt.num_qfull_cmds_dropped;
5229 qlt_chk_exch_leak_thresh_hold(vha);
5233 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
5235 qlt_incr_num_pend_cmds(vha);
5236 INIT_LIST_HEAD(&cmd->cmd_list);
5237 memcpy(&cmd->atio, atio, sizeof(*atio));
5239 cmd->tgt = vha->vha_tgt.qla_tgt;
5241 cmd->reset_count = vha->hw->chip_reset;
5246 /* NOTE: borrowing the state field to carry the status */
5247 cmd->state = status;
5249 cmd->term_exchg = 1;
5251 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5253 vha->hw->tgt.num_qfull_cmds_alloc++;
5254 if (vha->hw->tgt.num_qfull_cmds_alloc >
5255 vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
5256 vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
5257 vha->hw->tgt.num_qfull_cmds_alloc;
5261 qlt_free_qfull_cmds(struct scsi_qla_host *vha)
5263 struct qla_hw_data *ha = vha->hw;
5264 unsigned long flags;
5265 struct qla_tgt_cmd *cmd, *tcmd;
5266 struct list_head free_list;
5269 if (list_empty(&ha->tgt.q_full_list))
5272 INIT_LIST_HEAD(&free_list);
5274 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
5276 if (list_empty(&ha->tgt.q_full_list)) {
5277 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
5281 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
5283 /* cmd->state is a borrowed field to hold status */
5284 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
5285 else if (cmd->term_exchg)
5286 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
5292 ql_dbg(ql_dbg_io, vha, 0x3006,
5293 "%s: busy sent for ox_id[%04x]\n", __func__,
5294 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5295 else if (cmd->term_exchg)
5296 ql_dbg(ql_dbg_io, vha, 0x3007,
5297 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5298 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5300 ql_dbg(ql_dbg_io, vha, 0x3008,
5301 "%s: Unexpected cmd in QFull list %p\n", __func__,
5304 list_del(&cmd->cmd_list);
5305 list_add_tail(&cmd->cmd_list, &free_list);
5307 /* piggy back on hardware_lock for protection */
5308 vha->hw->tgt.num_qfull_cmds_alloc--;
5310 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
5314 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5315 list_del(&cmd->cmd_list);
5316 /* This cmd was never sent to TCM. There is no need
5317 * to schedule free or call free_cmd
5325 qlt_send_busy(struct scsi_qla_host *vha,
5326 struct atio_from_isp *atio, uint16_t status)
5330 rc = __qlt_send_busy(vha, atio, status);
5332 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5336 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5337 struct atio_from_isp *atio)
5339 struct qla_hw_data *ha = vha->hw;
5342 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5345 status = temp_sam_status;
5346 qlt_send_busy(vha, atio, status);
5350 /* ha->hardware_lock supposed to be held on entry */
5351 /* called via callback from qla2xxx */
5352 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5353 struct atio_from_isp *atio, uint8_t ha_locked)
5355 struct qla_hw_data *ha = vha->hw;
5356 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5358 unsigned long flags;
5360 if (unlikely(tgt == NULL)) {
5361 ql_dbg(ql_dbg_io, vha, 0x3064,
5362 "ATIO pkt, but no tgt (ha %p)", ha);
5366 * In tgt_stop mode we also should allow all requests to pass.
5367 * Otherwise, some commands can stuck.
5370 tgt->atio_irq_cmd_count++;
5372 switch (atio->u.raw.entry_type) {
5374 if (unlikely(atio->u.isp24.exchange_addr ==
5375 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5376 ql_dbg(ql_dbg_io, vha, 0x3065,
5377 "qla_target(%d): ATIO_TYPE7 "
5378 "received with UNKNOWN exchange address, "
5379 "sending QUEUE_FULL\n", vha->vp_idx);
5381 spin_lock_irqsave(&ha->hardware_lock, flags);
5382 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
5384 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5390 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5391 rc = qlt_chk_qfull_thresh_hold(vha, atio);
5393 tgt->atio_irq_cmd_count--;
5396 rc = qlt_handle_cmd_for_atio(vha, atio);
5398 rc = qlt_handle_task_mgmt(vha, atio);
5400 if (unlikely(rc != 0)) {
5404 (&ha->hardware_lock, flags);
5406 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5407 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5409 qlt_send_term_exchange(vha, NULL, atio, 1);
5413 spin_unlock_irqrestore
5414 (&ha->hardware_lock, flags);
5417 if (tgt->tgt_stop) {
5418 ql_dbg(ql_dbg_tgt, vha, 0xe059,
5419 "qla_target: Unable to send "
5420 "command to target for req, "
5423 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
5424 "qla_target(%d): Unable to send "
5425 "command to target, sending BUSY "
5426 "status.\n", vha->vp_idx);
5429 &ha->hardware_lock, flags);
5430 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5432 spin_unlock_irqrestore(
5433 &ha->hardware_lock, flags);
5439 case IMMED_NOTIFY_TYPE:
5441 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5442 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5443 "qla_target(%d): Received ATIO packet %x "
5444 "with error status %x\n", vha->vp_idx,
5445 atio->u.raw.entry_type,
5446 atio->u.isp2x.entry_status);
5449 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5452 spin_lock_irqsave(&ha->hardware_lock, flags);
5453 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5455 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5460 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5461 "qla_target(%d): Received unknown ATIO atio "
5462 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5466 tgt->atio_irq_cmd_count--;
5469 /* ha->hardware_lock supposed to be held on entry */
5470 /* called via callback from qla2xxx */
5471 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5473 struct qla_hw_data *ha = vha->hw;
5474 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5476 if (unlikely(tgt == NULL)) {
5477 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5478 "qla_target(%d): Response pkt %x received, but no "
5479 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
5484 * In tgt_stop mode we also should allow all requests to pass.
5485 * Otherwise, some commands can stuck.
5488 tgt->irq_cmd_count++;
5490 switch (pkt->entry_type) {
5494 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5495 qlt_do_ctio_completion(vha, entry->handle,
5496 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5501 case ACCEPT_TGT_IO_TYPE:
5503 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5505 if (atio->u.isp2x.status !=
5506 cpu_to_le16(ATIO_CDB_VALID)) {
5507 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5508 "qla_target(%d): ATIO with error "
5509 "status %x received\n", vha->vp_idx,
5510 le16_to_cpu(atio->u.isp2x.status));
5514 rc = qlt_chk_qfull_thresh_hold(vha, atio);
5516 tgt->irq_cmd_count--;
5520 rc = qlt_handle_cmd_for_atio(vha, atio);
5521 if (unlikely(rc != 0)) {
5523 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5524 qlt_send_busy(vha, atio, 0);
5526 qlt_send_term_exchange(vha, NULL, atio, 1);
5529 if (tgt->tgt_stop) {
5530 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5531 "qla_target: Unable to send "
5532 "command to target, sending TERM "
5533 "EXCHANGE for rsp\n");
5534 qlt_send_term_exchange(vha, NULL,
5537 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5538 "qla_target(%d): Unable to send "
5539 "command to target, sending BUSY "
5540 "status\n", vha->vp_idx);
5541 qlt_send_busy(vha, atio, 0);
5548 case CONTINUE_TGT_IO_TYPE:
5550 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5551 qlt_do_ctio_completion(vha, entry->handle,
5552 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5559 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5560 qlt_do_ctio_completion(vha, entry->handle,
5561 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5566 case IMMED_NOTIFY_TYPE:
5567 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5568 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5571 case NOTIFY_ACK_TYPE:
5572 if (tgt->notify_ack_expected > 0) {
5573 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5574 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5575 "NOTIFY_ACK seq %08x status %x\n",
5576 le16_to_cpu(entry->u.isp2x.seq_id),
5577 le16_to_cpu(entry->u.isp2x.status));
5578 tgt->notify_ack_expected--;
5579 if (entry->u.isp2x.status !=
5580 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5581 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5582 "qla_target(%d): NOTIFY_ACK "
5583 "failed %x\n", vha->vp_idx,
5584 le16_to_cpu(entry->u.isp2x.status));
5587 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5588 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5593 case ABTS_RECV_24XX:
5594 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5595 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5596 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5599 case ABTS_RESP_24XX:
5600 if (tgt->abts_resp_expected > 0) {
5601 struct abts_resp_from_24xx_fw *entry =
5602 (struct abts_resp_from_24xx_fw *)pkt;
5603 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5604 "ABTS_RESP_24XX: compl_status %x\n",
5605 entry->compl_status);
5606 tgt->abts_resp_expected--;
5607 if (le16_to_cpu(entry->compl_status) !=
5608 ABTS_RESP_COMPL_SUCCESS) {
5609 if ((entry->error_subcode1 == 0x1E) &&
5610 (entry->error_subcode2 == 0)) {
5612 * We've got a race here: aborted
5613 * exchange not terminated, i.e.
5614 * response for the aborted command was
5615 * sent between the abort request was
5616 * received and processed.
5617 * Unfortunately, the firmware has a
5618 * silly requirement that all aborted
5619 * exchanges must be explicitely
5620 * terminated, otherwise it refuses to
5621 * send responses for the abort
5622 * requests. So, we have to
5623 * (re)terminate the exchange and retry
5624 * the abort response.
5626 qlt_24xx_retry_term_exchange(vha,
5629 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5630 "qla_target(%d): ABTS_RESP_24XX "
5631 "failed %x (subcode %x:%x)",
5632 vha->vp_idx, entry->compl_status,
5633 entry->error_subcode1,
5634 entry->error_subcode2);
5637 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5638 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5639 "received\n", vha->vp_idx);
5644 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5645 "qla_target(%d): Received unknown response pkt "
5646 "type %x\n", vha->vp_idx, pkt->entry_type);
5650 tgt->irq_cmd_count--;
5654 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5656 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5659 struct qla_hw_data *ha = vha->hw;
5660 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5663 if (!ha->tgt.tgt_ops)
5666 if (unlikely(tgt == NULL)) {
5667 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
5668 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
5672 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5676 * In tgt_stop mode we also should allow all requests to pass.
5677 * Otherwise, some commands can stuck.
5680 tgt->irq_cmd_count++;
5683 case MBA_RESET: /* Reset */
5684 case MBA_SYSTEM_ERR: /* System Error */
5685 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
5686 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
5687 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5688 "qla_target(%d): System error async event %#x "
5689 "occurred", vha->vp_idx, code);
5691 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
5692 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5697 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5698 "qla_target(%d): Async LOOP_UP occurred "
5699 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5700 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5701 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5702 if (tgt->link_reinit_iocb_pending) {
5703 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
5705 tgt->link_reinit_iocb_pending = 0;
5710 case MBA_LIP_OCCURRED:
5713 case MBA_RSCN_UPDATE:
5714 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5715 "qla_target(%d): Async event %#x occurred "
5716 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5717 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5718 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5721 case MBA_PORT_UPDATE:
5722 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5723 "qla_target(%d): Port update async event %#x "
5724 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5725 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5726 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5727 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5729 login_code = le16_to_cpu(mailbox[2]);
5730 if (login_code == 0x4)
5731 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5732 "Async MB 2: Got PLOGI Complete\n");
5733 else if (login_code == 0x7)
5734 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5735 "Async MB 2: Port Logged Out\n");
5742 tgt->irq_cmd_count--;
5745 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5751 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
5753 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
5754 "qla_target(%d): Allocation of tmp FC port failed",
5759 fcport->loop_id = loop_id;
5761 rc = qla2x00_get_port_database(vha, fcport, 0);
5762 if (rc != QLA_SUCCESS) {
5763 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
5764 "qla_target(%d): Failed to retrieve fcport "
5765 "information -- get_port_database() returned %x "
5766 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
5774 /* Must be called under tgt_mutex */
5775 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
5778 struct qla_tgt_sess *sess = NULL;
5779 fc_port_t *fcport = NULL;
5780 int rc, global_resets;
5781 uint16_t loop_id = 0;
5783 mutex_lock(&vha->vha_tgt.tgt_mutex);
5787 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5789 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
5791 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5793 if ((s_id[0] == 0xFF) &&
5794 (s_id[1] == 0xFC)) {
5796 * This is Domain Controller, so it should be
5797 * OK to drop SCSI commands from it.
5799 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
5800 "Unable to find initiator with S_ID %x:%x:%x",
5801 s_id[0], s_id[1], s_id[2]);
5803 ql_log(ql_log_info, vha, 0xf071,
5804 "qla_target(%d): Unable to find "
5805 "initiator with S_ID %x:%x:%x",
5806 vha->vp_idx, s_id[0], s_id[1],
5809 if (rc == -ENOENT) {
5810 qlt_port_logo_t logo;
5811 sid_to_portid(s_id, &logo.id);
5813 qlt_send_first_logo(vha, &logo);
5819 fcport = qlt_get_port_database(vha, loop_id);
5821 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5825 if (global_resets !=
5826 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5827 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
5828 "qla_target(%d): global reset during session discovery "
5829 "(counter was %d, new %d), retrying", vha->vp_idx,
5831 atomic_read(&vha->vha_tgt.
5832 qla_tgt->tgt_global_resets_count));
5836 sess = qlt_create_sess(vha, fcport, true);
5838 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5844 static void qlt_abort_work(struct qla_tgt *tgt,
5845 struct qla_tgt_sess_work_param *prm)
5847 struct scsi_qla_host *vha = tgt->vha;
5848 struct qla_hw_data *ha = vha->hw;
5849 struct qla_tgt_sess *sess = NULL;
5850 unsigned long flags = 0, flags2 = 0;
5855 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5860 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5861 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
5862 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
5864 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5865 (unsigned char *)&be_s_id);
5867 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5869 sess = qlt_make_local_sess(vha, s_id);
5870 /* sess has got an extra creation ref */
5872 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5876 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5881 kref_get(&sess->se_sess->sess_kref);
5884 spin_lock_irqsave(&ha->hardware_lock, flags);
5889 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5892 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5894 ha->tgt.tgt_ops->put_sess(sess);
5895 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5899 spin_lock_irqsave(&ha->hardware_lock, flags);
5902 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5903 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5906 ha->tgt.tgt_ops->put_sess(sess);
5907 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5910 static void qlt_tmr_work(struct qla_tgt *tgt,
5911 struct qla_tgt_sess_work_param *prm)
5913 struct atio_from_isp *a = &prm->tm_iocb2;
5914 struct scsi_qla_host *vha = tgt->vha;
5915 struct qla_hw_data *ha = vha->hw;
5916 struct qla_tgt_sess *sess = NULL;
5917 unsigned long flags;
5918 uint8_t *s_id = NULL; /* to hide compiler warnings */
5920 uint32_t lun, unpacked_lun;
5924 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5929 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5930 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5932 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5934 sess = qlt_make_local_sess(vha, s_id);
5935 /* sess has got an extra creation ref */
5937 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5941 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5946 kref_get(&sess->se_sess->sess_kref);
5950 lun = a->u.isp24.fcp_cmnd.lun;
5951 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5952 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5954 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5958 ha->tgt.tgt_ops->put_sess(sess);
5959 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5963 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0);
5965 ha->tgt.tgt_ops->put_sess(sess);
5966 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5969 static void qlt_sess_work_fn(struct work_struct *work)
5971 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
5972 struct scsi_qla_host *vha = tgt->vha;
5973 unsigned long flags;
5975 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
5977 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5978 while (!list_empty(&tgt->sess_works_list)) {
5979 struct qla_tgt_sess_work_param *prm = list_entry(
5980 tgt->sess_works_list.next, typeof(*prm),
5981 sess_works_list_entry);
5984 * This work can be scheduled on several CPUs at time, so we
5985 * must delete the entry to eliminate double processing
5987 list_del(&prm->sess_works_list_entry);
5989 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5991 switch (prm->type) {
5992 case QLA_TGT_SESS_WORK_ABORT:
5993 qlt_abort_work(tgt, prm);
5995 case QLA_TGT_SESS_WORK_TM:
5996 qlt_tmr_work(tgt, prm);
6003 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6007 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6010 /* Must be called under tgt_host_action_mutex */
6011 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6013 struct qla_tgt *tgt;
6015 if (!QLA_TGT_MODE_ENABLED())
6018 if (!IS_TGT_MODE_CAPABLE(ha)) {
6019 ql_log(ql_log_warn, base_vha, 0xe070,
6020 "This adapter does not support target mode.\n");
6024 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6025 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6027 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6029 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6031 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6032 "Unable to allocate struct qla_tgt\n");
6036 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6037 base_vha->host->hostt->supported_mode |= MODE_TARGET;
6040 tgt->vha = base_vha;
6041 init_waitqueue_head(&tgt->waitQ);
6042 INIT_LIST_HEAD(&tgt->sess_list);
6043 INIT_LIST_HEAD(&tgt->del_sess_list);
6044 INIT_DELAYED_WORK(&tgt->sess_del_work,
6045 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
6046 spin_lock_init(&tgt->sess_work_lock);
6047 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6048 INIT_LIST_HEAD(&tgt->sess_works_list);
6049 spin_lock_init(&tgt->srr_lock);
6050 INIT_LIST_HEAD(&tgt->srr_ctio_list);
6051 INIT_LIST_HEAD(&tgt->srr_imm_list);
6052 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
6053 atomic_set(&tgt->tgt_global_resets_count, 0);
6055 base_vha->vha_tgt.qla_tgt = tgt;
6057 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6058 "qla_target(%d): using 64 Bit PCI addressing",
6060 tgt->tgt_enable_64bit_addr = 1;
6062 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6063 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
6064 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
6066 if (base_vha->fc_vport)
6069 mutex_lock(&qla_tgt_mutex);
6070 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6071 mutex_unlock(&qla_tgt_mutex);
6076 /* Must be called under tgt_host_action_mutex */
6077 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6079 if (!vha->vha_tgt.qla_tgt)
6082 if (vha->fc_vport) {
6083 qlt_release(vha->vha_tgt.qla_tgt);
6087 /* free left over qfull cmds */
6088 qlt_init_term_exchange(vha);
6090 mutex_lock(&qla_tgt_mutex);
6091 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
6092 mutex_unlock(&qla_tgt_mutex);
6094 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6096 qlt_release(vha->vha_tgt.qla_tgt);
6101 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6106 pr_debug("qla2xxx HW vha->node_name: ");
6107 for (i = 0; i < WWN_SIZE; i++)
6108 pr_debug("%02x ", vha->node_name[i]);
6110 pr_debug("qla2xxx HW vha->port_name: ");
6111 for (i = 0; i < WWN_SIZE; i++)
6112 pr_debug("%02x ", vha->port_name[i]);
6115 pr_debug("qla2xxx passed configfs WWPN: ");
6116 put_unaligned_be64(wwpn, b);
6117 for (i = 0; i < WWN_SIZE; i++)
6118 pr_debug("%02x ", b[i]);
6123 * qla_tgt_lport_register - register lport with external module
6125 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
6126 * @wwpn: Passwd FC target WWPN
6127 * @callback: lport initialization callback for tcm_qla2xxx code
6128 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6130 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6131 u64 npiv_wwpn, u64 npiv_wwnn,
6132 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6134 struct qla_tgt *tgt;
6135 struct scsi_qla_host *vha;
6136 struct qla_hw_data *ha;
6137 struct Scsi_Host *host;
6138 unsigned long flags;
6142 mutex_lock(&qla_tgt_mutex);
6143 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6151 if (!(host->hostt->supported_mode & MODE_TARGET))
6154 spin_lock_irqsave(&ha->hardware_lock, flags);
6155 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6156 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6158 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6161 if (tgt->tgt_stop) {
6162 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6164 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6167 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6169 if (!scsi_host_get(host)) {
6170 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6171 "Unable to scsi_host_get() for"
6172 " qla2xxx scsi_host\n");
6175 qlt_lport_dump(vha, phys_wwpn, b);
6177 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6178 scsi_host_put(host);
6181 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6183 scsi_host_put(host);
6185 mutex_unlock(&qla_tgt_mutex);
6188 mutex_unlock(&qla_tgt_mutex);
6192 EXPORT_SYMBOL(qlt_lport_register);
6195 * qla_tgt_lport_deregister - Degister lport
6197 * @vha: Registered scsi_qla_host pointer
6199 void qlt_lport_deregister(struct scsi_qla_host *vha)
6201 struct qla_hw_data *ha = vha->hw;
6202 struct Scsi_Host *sh = vha->host;
6204 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6206 vha->vha_tgt.target_lport_ptr = NULL;
6207 ha->tgt.tgt_ops = NULL;
6209 * Release the Scsi_Host reference for the underlying qla2xxx host
6213 EXPORT_SYMBOL(qlt_lport_deregister);
6215 /* Must be called under HW lock */
6216 static void qlt_set_mode(struct scsi_qla_host *vha)
6218 struct qla_hw_data *ha = vha->hw;
6220 switch (ql2x_ini_mode) {
6221 case QLA2XXX_INI_MODE_DISABLED:
6222 case QLA2XXX_INI_MODE_EXCLUSIVE:
6223 vha->host->active_mode = MODE_TARGET;
6225 case QLA2XXX_INI_MODE_ENABLED:
6226 vha->host->active_mode |= MODE_TARGET;
6232 if (ha->tgt.ini_mode_force_reverse)
6233 qla_reverse_ini_mode(vha);
6236 /* Must be called under HW lock */
6237 static void qlt_clear_mode(struct scsi_qla_host *vha)
6239 struct qla_hw_data *ha = vha->hw;
6241 switch (ql2x_ini_mode) {
6242 case QLA2XXX_INI_MODE_DISABLED:
6243 vha->host->active_mode = MODE_UNKNOWN;
6245 case QLA2XXX_INI_MODE_EXCLUSIVE:
6246 vha->host->active_mode = MODE_INITIATOR;
6248 case QLA2XXX_INI_MODE_ENABLED:
6249 vha->host->active_mode &= ~MODE_TARGET;
6255 if (ha->tgt.ini_mode_force_reverse)
6256 qla_reverse_ini_mode(vha);
6260 * qla_tgt_enable_vha - NO LOCK HELD
6262 * host_reset, bring up w/ Target Mode Enabled
6265 qlt_enable_vha(struct scsi_qla_host *vha)
6267 struct qla_hw_data *ha = vha->hw;
6268 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6269 unsigned long flags;
6270 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6271 int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
6274 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6275 "Unable to locate qla_tgt pointer from"
6276 " struct qla_hw_data\n");
6281 spin_lock_irqsave(&ha->hardware_lock, flags);
6282 tgt->tgt_stopped = 0;
6284 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6287 qla24xx_disable_vp(vha);
6288 qla24xx_enable_vp(vha);
6290 if (ha->msix_entries) {
6291 ql_dbg(ql_dbg_tgt, vha, 0xffff,
6292 "%s: host%ld : vector %d cpu %d\n",
6293 __func__, vha->host_no,
6294 ha->msix_entries[rspq_ent].vector,
6295 ha->msix_entries[rspq_ent].cpuid);
6297 ha->tgt.rspq_vector_cpuid =
6298 ha->msix_entries[rspq_ent].cpuid;
6301 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6302 qla2xxx_wake_dpc(base_vha);
6303 qla2x00_wait_for_hba_online(base_vha);
6306 EXPORT_SYMBOL(qlt_enable_vha);
6309 * qla_tgt_disable_vha - NO LOCK HELD
6311 * Disable Target Mode and reset the adapter
6313 static void qlt_disable_vha(struct scsi_qla_host *vha)
6315 struct qla_hw_data *ha = vha->hw;
6316 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6317 unsigned long flags;
6320 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6321 "Unable to locate qla_tgt pointer from"
6322 " struct qla_hw_data\n");
6327 spin_lock_irqsave(&ha->hardware_lock, flags);
6328 qlt_clear_mode(vha);
6329 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6331 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6332 qla2xxx_wake_dpc(vha);
6333 qla2x00_wait_for_hba_online(vha);
6337 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6338 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6342 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6344 if (!qla_tgt_mode_enabled(vha))
6347 vha->vha_tgt.qla_tgt = NULL;
6349 mutex_init(&vha->vha_tgt.tgt_mutex);
6350 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6352 qlt_clear_mode(vha);
6355 * NOTE: Currently the value is kept the same for <24xx and
6356 * >=24xx ISPs. If it is necessary to change it,
6357 * the check should be added for specific ISPs,
6358 * assigning the value appropriately.
6360 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6362 qlt_add_target(ha, vha);
6366 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
6369 * FC-4 Feature bit 0 indicates target functionality to the name server.
6371 if (qla_tgt_mode_enabled(vha)) {
6372 if (qla_ini_mode_enabled(vha))
6373 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
6375 ct_req->req.rff_id.fc4_feature = BIT_0;
6376 } else if (qla_ini_mode_enabled(vha)) {
6377 ct_req->req.rff_id.fc4_feature = BIT_1;
6382 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6385 * Beginning of ATIO ring has initialization control block already built
6386 * by nvram config routine.
6388 * Returns 0 on success.
6391 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6393 struct qla_hw_data *ha = vha->hw;
6395 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6397 if (!qla_tgt_mode_enabled(vha))
6400 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6401 pkt->u.raw.signature = ATIO_PROCESSED;
6408 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6409 * @ha: SCSI driver HA context
6412 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6414 struct qla_hw_data *ha = vha->hw;
6415 struct atio_from_isp *pkt;
6418 if (!vha->flags.online)
6421 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
6422 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6423 cnt = pkt->u.raw.entry_count;
6425 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
6428 for (i = 0; i < cnt; i++) {
6429 ha->tgt.atio_ring_index++;
6430 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6431 ha->tgt.atio_ring_index = 0;
6432 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6434 ha->tgt.atio_ring_ptr++;
6436 pkt->u.raw.signature = ATIO_PROCESSED;
6437 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6442 /* Adjust ring index */
6443 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6444 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
6448 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6450 struct qla_hw_data *ha = vha->hw;
6451 if (!QLA_TGT_MODE_ENABLED())
6454 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
6455 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6456 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6458 if (IS_ATIO_MSIX_CAPABLE(ha)) {
6459 struct qla_msix_entry *msix = &ha->msix_entries[2];
6460 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6462 icb->msix_atio = cpu_to_le16(msix->entry);
6463 ql_dbg(ql_dbg_init, vha, 0xf072,
6464 "Registering ICB vector 0x%x for atio que.\n",
6470 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6472 struct qla_hw_data *ha = vha->hw;
6474 if (qla_tgt_mode_enabled(vha)) {
6475 if (!ha->tgt.saved_set) {
6476 /* We save only once */
6477 ha->tgt.saved_exchange_count = nv->exchange_count;
6478 ha->tgt.saved_firmware_options_1 =
6479 nv->firmware_options_1;
6480 ha->tgt.saved_firmware_options_2 =
6481 nv->firmware_options_2;
6482 ha->tgt.saved_firmware_options_3 =
6483 nv->firmware_options_3;
6484 ha->tgt.saved_set = 1;
6487 nv->exchange_count = cpu_to_le16(0xFFFF);
6489 /* Enable target mode */
6490 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6492 /* Disable ini mode, if requested */
6493 if (!qla_ini_mode_enabled(vha))
6494 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6496 /* Disable Full Login after LIP */
6497 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6498 /* Enable initial LIP */
6499 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6500 if (ql2xtgt_tape_enable)
6501 /* Enable FC Tape support */
6502 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6504 /* Disable FC Tape support */
6505 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6507 /* Disable Full Login after LIP */
6508 nv->host_p &= cpu_to_le32(~BIT_10);
6509 /* Enable target PRLI control */
6510 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6512 if (ha->tgt.saved_set) {
6513 nv->exchange_count = ha->tgt.saved_exchange_count;
6514 nv->firmware_options_1 =
6515 ha->tgt.saved_firmware_options_1;
6516 nv->firmware_options_2 =
6517 ha->tgt.saved_firmware_options_2;
6518 nv->firmware_options_3 =
6519 ha->tgt.saved_firmware_options_3;
6524 /* out-of-order frames reassembly */
6525 nv->firmware_options_3 |= BIT_6|BIT_9;
6527 if (ha->tgt.enable_class_2) {
6528 if (vha->flags.init_done)
6529 fc_host_supported_classes(vha->host) =
6530 FC_COS_CLASS2 | FC_COS_CLASS3;
6532 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6534 if (vha->flags.init_done)
6535 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6537 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6542 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6543 struct init_cb_24xx *icb)
6545 struct qla_hw_data *ha = vha->hw;
6547 if (!QLA_TGT_MODE_ENABLED())
6550 if (ha->tgt.node_name_set) {
6551 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6552 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6555 /* disable ZIO at start time. */
6556 if (!vha->flags.init_done) {
6558 tmp = le32_to_cpu(icb->firmware_options_2);
6559 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6560 icb->firmware_options_2 = cpu_to_le32(tmp);
6565 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6567 struct qla_hw_data *ha = vha->hw;
6569 if (!QLA_TGT_MODE_ENABLED())
6572 if (qla_tgt_mode_enabled(vha)) {
6573 if (!ha->tgt.saved_set) {
6574 /* We save only once */
6575 ha->tgt.saved_exchange_count = nv->exchange_count;
6576 ha->tgt.saved_firmware_options_1 =
6577 nv->firmware_options_1;
6578 ha->tgt.saved_firmware_options_2 =
6579 nv->firmware_options_2;
6580 ha->tgt.saved_firmware_options_3 =
6581 nv->firmware_options_3;
6582 ha->tgt.saved_set = 1;
6585 nv->exchange_count = cpu_to_le16(0xFFFF);
6587 /* Enable target mode */
6588 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6590 /* Disable ini mode, if requested */
6591 if (!qla_ini_mode_enabled(vha))
6592 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6594 /* Disable Full Login after LIP */
6595 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6596 /* Enable initial LIP */
6597 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6598 if (ql2xtgt_tape_enable)
6599 /* Enable FC tape support */
6600 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6602 /* Disable FC tape support */
6603 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6605 /* Disable Full Login after LIP */
6606 nv->host_p &= cpu_to_le32(~BIT_10);
6607 /* Enable target PRLI control */
6608 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6610 if (ha->tgt.saved_set) {
6611 nv->exchange_count = ha->tgt.saved_exchange_count;
6612 nv->firmware_options_1 =
6613 ha->tgt.saved_firmware_options_1;
6614 nv->firmware_options_2 =
6615 ha->tgt.saved_firmware_options_2;
6616 nv->firmware_options_3 =
6617 ha->tgt.saved_firmware_options_3;
6622 /* out-of-order frames reassembly */
6623 nv->firmware_options_3 |= BIT_6|BIT_9;
6625 if (ha->tgt.enable_class_2) {
6626 if (vha->flags.init_done)
6627 fc_host_supported_classes(vha->host) =
6628 FC_COS_CLASS2 | FC_COS_CLASS3;
6630 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6632 if (vha->flags.init_done)
6633 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6635 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6640 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6641 struct init_cb_81xx *icb)
6643 struct qla_hw_data *ha = vha->hw;
6645 if (!QLA_TGT_MODE_ENABLED())
6648 if (ha->tgt.node_name_set) {
6649 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6650 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6653 /* disable ZIO at start time. */
6654 if (!vha->flags.init_done) {
6656 tmp = le32_to_cpu(icb->firmware_options_2);
6657 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6658 icb->firmware_options_2 = cpu_to_le32(tmp);
6664 qlt_83xx_iospace_config(struct qla_hw_data *ha)
6666 if (!QLA_TGT_MODE_ENABLED())
6669 ha->msix_count += 1; /* For ATIO Q */
6673 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
6674 struct sts_entry_24xx *pkt)
6676 switch (pkt->entry_type) {
6677 case ABTS_RECV_24XX:
6678 case ABTS_RESP_24XX:
6680 case NOTIFY_ACK_TYPE:
6689 qlt_modify_vp_config(struct scsi_qla_host *vha,
6690 struct vp_config_entry_24xx *vpmod)
6692 if (qla_tgt_mode_enabled(vha))
6693 vpmod->options_idx1 &= ~BIT_5;
6694 /* Disable ini mode, if requested */
6695 if (!qla_ini_mode_enabled(vha))
6696 vpmod->options_idx1 &= ~BIT_4;
6700 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6702 if (!QLA_TGT_MODE_ENABLED())
6705 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6706 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
6707 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
6709 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
6710 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
6713 mutex_init(&base_vha->vha_tgt.tgt_mutex);
6714 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6715 qlt_clear_mode(base_vha);
6719 qla83xx_msix_atio_q(int irq, void *dev_id)
6721 struct rsp_que *rsp;
6722 scsi_qla_host_t *vha;
6723 struct qla_hw_data *ha;
6724 unsigned long flags;
6726 rsp = (struct rsp_que *) dev_id;
6728 vha = pci_get_drvdata(ha->pdev);
6730 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6732 qlt_24xx_process_atio_queue(vha, 0);
6734 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6740 qlt_handle_abts_recv_work(struct work_struct *work)
6742 struct qla_tgt_sess_op *op = container_of(work,
6743 struct qla_tgt_sess_op, work);
6744 scsi_qla_host_t *vha = op->vha;
6745 struct qla_hw_data *ha = vha->hw;
6746 unsigned long flags;
6748 if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
6751 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6752 qlt_24xx_process_atio_queue(vha, 0);
6753 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6755 spin_lock_irqsave(&ha->hardware_lock, flags);
6756 qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
6757 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6761 qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
6763 struct qla_tgt_sess_op *op;
6765 op = kzalloc(sizeof(*op), GFP_ATOMIC);
6768 /* do not reach for ATIO queue here. This is best effort err
6769 * recovery at this point.
6771 qlt_response_pkt_all_vps(vha, pkt);
6775 memcpy(&op->atio, pkt, sizeof(*pkt));
6777 op->chip_reset = vha->hw->chip_reset;
6778 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
6779 queue_work(qla_tgt_wq, &op->work);
6784 qlt_mem_alloc(struct qla_hw_data *ha)
6786 if (!QLA_TGT_MODE_ENABLED())
6789 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
6790 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
6791 if (!ha->tgt.tgt_vp_map)
6794 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
6795 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
6796 &ha->tgt.atio_dma, GFP_KERNEL);
6797 if (!ha->tgt.atio_ring) {
6798 kfree(ha->tgt.tgt_vp_map);
6805 qlt_mem_free(struct qla_hw_data *ha)
6807 if (!QLA_TGT_MODE_ENABLED())
6810 if (ha->tgt.atio_ring) {
6811 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
6812 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
6815 kfree(ha->tgt.tgt_vp_map);
6818 /* vport_slock to be held by the caller */
6820 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
6822 if (!QLA_TGT_MODE_ENABLED())
6827 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
6830 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
6833 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
6836 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
6841 static int __init qlt_parse_ini_mode(void)
6843 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
6844 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
6845 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
6846 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
6847 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
6848 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
6855 int __init qlt_init(void)
6859 if (!qlt_parse_ini_mode()) {
6860 ql_log(ql_log_fatal, NULL, 0xe06b,
6861 "qlt_parse_ini_mode() failed\n");
6865 if (!QLA_TGT_MODE_ENABLED())
6868 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
6869 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
6870 qla_tgt_mgmt_cmd), 0, NULL);
6871 if (!qla_tgt_mgmt_cmd_cachep) {
6872 ql_log(ql_log_fatal, NULL, 0xe06d,
6873 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
6877 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
6878 sizeof(qlt_plogi_ack_t),
6879 __alignof__(qlt_plogi_ack_t),
6882 if (!qla_tgt_plogi_cachep) {
6883 ql_log(ql_log_fatal, NULL, 0xe06d,
6884 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
6886 goto out_mgmt_cmd_cachep;
6889 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
6890 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
6891 if (!qla_tgt_mgmt_cmd_mempool) {
6892 ql_log(ql_log_fatal, NULL, 0xe06e,
6893 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
6895 goto out_plogi_cachep;
6898 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
6900 ql_log(ql_log_fatal, NULL, 0xe06f,
6901 "alloc_workqueue for qla_tgt_wq failed\n");
6903 goto out_cmd_mempool;
6906 * Return 1 to signal that initiator-mode is being disabled
6908 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
6911 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6913 kmem_cache_destroy(qla_tgt_plogi_cachep);
6914 out_mgmt_cmd_cachep:
6915 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
6921 if (!QLA_TGT_MODE_ENABLED())
6924 destroy_workqueue(qla_tgt_wq);
6925 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6926 kmem_cache_destroy(qla_tgt_plogi_cachep);
6927 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);