else
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
- atomic_dec(&ndlp->cmd_pending);
-
/* Update stats and complete the IO. There is
* no need for dma unprep because the nvme_transport
* owns the dma address.
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
- !expedite) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6174 Fail IO, ndlp qdepth exceeded: "
- "idx %d DID %x pend %d qdepth %d\n",
- lpfc_queue_info->index, ndlp->nlp_DID,
- atomic_read(&ndlp->cmd_pending),
- ndlp->cmd_qdepth);
- atomic_inc(&lport->xmt_fcp_qdepth);
- ret = -EBUSY;
- goto out_fail;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6174 Fail IO, ndlp qdepth exceeded: "
+ "idx %d DID %x pend %d qdepth %d\n",
+ lpfc_queue_info->index, ndlp->nlp_DID,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->cmd_qdepth);
+ atomic_inc(&lport->xmt_fcp_qdepth);
+ ret = -EBUSY;
+ goto out_fail;
+ }
}
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
goto out_free_nvme_buf;
}
- atomic_inc(&ndlp->cmd_pending);
-
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_queue_info->index, ndlp->nlp_DID);
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_inc(&lport->xmt_fcp_wqerr);
- atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 Fail IO, Could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
+ }
return lpfc_ncmd;
}
{
unsigned long iflag = 0;
+ if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
+ atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
+
lpfc_ncmd->nonsg_phys = 0;
+ lpfc_ncmd->ndlp = NULL;
+ lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
+
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
spin_unlock(&phba->scsi_buf_list_put_lock);
}
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
return lpfc_cmd;
}
/**
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
if (!found)
return NULL;
+
+ if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ atomic_inc(&ndlp->cmd_pending);
+ lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
+ }
return lpfc_cmd;
}
/**
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
+ if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
+ atomic_dec(&psb->ndlp->cmd_pending);
+ psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
phba->lpfc_release_scsi_buf(phba, psb);
}
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
spin_lock_irqsave(shost->host_lock, flags);
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- atomic_dec(&pnode->cmd_pending);
if (pnode->cmd_qdepth >
atomic_read(&pnode->cmd_pending) &&
(atomic_read(&pnode->cmd_pending) >
pnode->last_change_time = jiffies;
}
spin_unlock_irqrestore(shost->host_lock, flags);
- } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
- atomic_dec(&pnode->cmd_pending);
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
goto out_tgt_busy;
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
- "3377 Target Queue Full, scsi Id:%d Qdepth:%d"
- " Pending command:%d"
- " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
- " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- ndlp->nlp_sid, ndlp->cmd_qdepth,
- atomic_read(&ndlp->cmd_pending),
- ndlp->nlp_nodename.u.wwn[0],
- ndlp->nlp_nodename.u.wwn[1],
- ndlp->nlp_nodename.u.wwn[2],
- ndlp->nlp_nodename.u.wwn[3],
- ndlp->nlp_nodename.u.wwn[4],
- ndlp->nlp_nodename.u.wwn[5],
- ndlp->nlp_nodename.u.wwn[6],
- ndlp->nlp_nodename.u.wwn[7],
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
- goto out_tgt_busy;
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
+ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
+ "3377 Target Queue Full, scsi Id:%d "
+ "Qdepth:%d Pending command:%d"
+ " WWNN:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x, "
+ " WWPN:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x",
+ ndlp->nlp_sid, ndlp->cmd_qdepth,
+ atomic_read(&ndlp->cmd_pending),
+ ndlp->nlp_nodename.u.wwn[0],
+ ndlp->nlp_nodename.u.wwn[1],
+ ndlp->nlp_nodename.u.wwn[2],
+ ndlp->nlp_nodename.u.wwn[3],
+ ndlp->nlp_nodename.u.wwn[4],
+ ndlp->nlp_nodename.u.wwn[5],
+ ndlp->nlp_nodename.u.wwn[6],
+ ndlp->nlp_nodename.u.wwn[7],
+ ndlp->nlp_portname.u.wwn[0],
+ ndlp->nlp_portname.u.wwn[1],
+ ndlp->nlp_portname.u.wwn[2],
+ ndlp->nlp_portname.u.wwn[3],
+ ndlp->nlp_portname.u.wwn[4],
+ ndlp->nlp_portname.u.wwn[5],
+ ndlp->nlp_portname.u.wwn[6],
+ ndlp->nlp_portname.u.wwn[7]);
+ goto out_tgt_busy;
+ }
}
- atomic_inc(&ndlp->cmd_pending);
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
*/
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
+ lpfc_cmd->ndlp = ndlp;
lpfc_cmd->timeout = 0;
lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
- atomic_dec(&ndlp->cmd_pending);
return SCSI_MLQUEUE_HOST_BUSY;
out_tgt_busy:
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->ndlp = pnode;
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
task_mgmt_cmd);