1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_crtn.h"
54 #include "lpfc_vport.h"
55 #include "lpfc_debugfs.h"
57 /* NVME initiator-based functions */
59 static struct lpfc_nvme_buf *
60 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
64 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
66 static struct nvme_fc_port_template lpfc_nvme_template;
68 static union lpfc_wqe128 lpfc_iread_cmd_template;
69 static union lpfc_wqe128 lpfc_iwrite_cmd_template;
70 static union lpfc_wqe128 lpfc_icmnd_cmd_template;
72 /* Setup WQE templates for NVME IOs */
74 lpfc_nvme_cmd_template(void)
76 union lpfc_wqe128 *wqe;
79 wqe = &lpfc_iread_cmd_template;
80 memset(wqe, 0, sizeof(union lpfc_wqe128));
82 /* Word 0, 1, 2 - BDE is variable */
84 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
86 /* Word 4 - total_xfer_len is variable */
88 /* Word 5 - is zero */
90 /* Word 6 - ctxt_tag, xri_tag is variable */
93 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
94 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
95 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
96 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
98 /* Word 8 - abort_tag is variable */
100 /* Word 9 - reqtag is variable */
102 /* Word 10 - dbde, wqes is variable */
103 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
104 bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
105 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
106 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
107 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
108 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
110 /* Word 11 - pbde is variable */
111 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
112 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
113 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
115 /* Word 12 - is zero */
117 /* Word 13, 14, 15 - PBDE is variable */
119 /* IWRITE template */
120 wqe = &lpfc_iwrite_cmd_template;
121 memset(wqe, 0, sizeof(union lpfc_wqe128));
123 /* Word 0, 1, 2 - BDE is variable */
125 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
127 /* Word 4 - total_xfer_len is variable */
129 /* Word 5 - initial_xfer_len is variable */
131 /* Word 6 - ctxt_tag, xri_tag is variable */
134 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
135 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
136 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
137 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
139 /* Word 8 - abort_tag is variable */
141 /* Word 9 - reqtag is variable */
143 /* Word 10 - dbde, wqes is variable */
144 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
145 bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
146 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
147 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
148 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
149 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
151 /* Word 11 - pbde is variable */
152 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
153 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
154 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
156 /* Word 12 - is zero */
158 /* Word 13, 14, 15 - PBDE is variable */
161 wqe = &lpfc_icmnd_cmd_template;
162 memset(wqe, 0, sizeof(union lpfc_wqe128));
164 /* Word 0, 1, 2 - BDE is variable */
166 /* Word 3 - payload_offset_len is variable */
168 /* Word 4, 5 - is zero */
170 /* Word 6 - ctxt_tag, xri_tag is variable */
173 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
174 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
175 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
176 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
178 /* Word 8 - abort_tag is variable */
180 /* Word 9 - reqtag is variable */
182 /* Word 10 - dbde, wqes is variable */
183 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
184 bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
185 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
186 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
187 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
188 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
191 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
192 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
193 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
195 /* Word 12, 13, 14, 15 - is zero */
199 * lpfc_nvme_create_queue -
200 * @lpfc_pnvme: Pointer to the driver's nvme instance data
201 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
202 * @handle: An opaque driver handle used in follow-up calls.
204 * Driver registers this routine to preallocate and initialize any
205 * internal data structures to bind the @qidx to its internal IO queues.
206 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
210 * -EINVAL - Unsupported input value.
211 * -ENOMEM - Could not alloc necessary memory
214 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
215 unsigned int qidx, u16 qsize,
218 struct lpfc_nvme_lport *lport;
219 struct lpfc_vport *vport;
220 struct lpfc_nvme_qhandle *qhandle;
223 if (!pnvme_lport->private)
226 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
227 vport = lport->vport;
228 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
232 qhandle->cpu_id = smp_processor_id();
233 qhandle->qidx = qidx;
235 * NVME qidx == 0 is the admin queue, so both admin queue
236 * and first IO queue will use MSI-X vector and associated
237 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
240 str = "IO "; /* IO queue */
241 qhandle->index = ((qidx - 1) %
242 vport->phba->cfg_nvme_io_channel);
244 str = "ADM"; /* Admin queue */
245 qhandle->index = qidx;
248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
249 "6073 Binding %s HdwQueue %d (cpu %d) to "
250 "io_channel %d qhandle %p\n", str,
251 qidx, qhandle->cpu_id, qhandle->index, qhandle);
252 *handle = (void *)qhandle;
257 * lpfc_nvme_delete_queue -
258 * @lpfc_pnvme: Pointer to the driver's nvme instance data
259 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
260 * @handle: An opaque driver handle from lpfc_nvme_create_queue
262 * Driver registers this routine to free
263 * any internal data structures to bind the @qidx to its internal
268 * TODO: What are the failure codes.
271 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
275 struct lpfc_nvme_lport *lport;
276 struct lpfc_vport *vport;
278 if (!pnvme_lport->private)
281 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
282 vport = lport->vport;
284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
285 "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
286 lport, qidx, handle);
291 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
293 struct lpfc_nvme_lport *lport = localport->private;
295 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
296 "6173 localport %p delete complete\n",
299 /* release any threads waiting for the unreg to complete */
300 complete(&lport->lport_unreg_done);
303 /* lpfc_nvme_remoteport_delete
305 * @remoteport: Pointer to an nvme transport remoteport instance.
307 * This is a template downcall. NVME transport calls this function
308 * when it has completed the unregistration of a previously
309 * registered remoteport.
315 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
317 struct lpfc_nvme_rport *rport = remoteport->private;
318 struct lpfc_vport *vport;
319 struct lpfc_nodelist *ndlp;
329 /* Remove this rport from the lport's list - memory is owned by the
330 * transport. Remove the ndlp reference for the NVME transport before
331 * calling state machine to remove the node.
333 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
334 "6146 remoteport delete of remoteport %p\n",
336 spin_lock_irq(&vport->phba->hbalock);
338 /* The register rebind might have occurred before the delete
339 * downcall. Guard against this race.
341 if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
343 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
345 spin_unlock_irq(&vport->phba->hbalock);
347 /* Remove original register reference. The host transport
348 * won't reference this rport/remoteport any further.
357 lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
358 struct lpfc_wcqe_complete *wcqe)
360 struct lpfc_vport *vport = cmdwqe->vport;
361 struct lpfc_nvme_lport *lport;
363 struct nvmefc_ls_req *pnvme_lsreq;
364 struct lpfc_dmabuf *buf_ptr;
365 struct lpfc_nodelist *ndlp;
367 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
368 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
370 if (vport->localport) {
371 lport = (struct lpfc_nvme_lport *)vport->localport->private;
373 atomic_inc(&lport->fc4NvmeLsCmpls);
375 if (bf_get(lpfc_wcqe_c_xb, wcqe))
376 atomic_inc(&lport->cmpl_ls_xb);
377 atomic_inc(&lport->cmpl_ls_err);
382 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
383 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
384 "6047 nvme cmpl Enter "
385 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
386 "lsreg:%p bmp:%p ndlp:%p\n",
387 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
388 cmdwqe->sli4_xritag, status,
389 (wcqe->parameter & 0xffff),
390 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
392 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
393 cmdwqe->sli4_xritag, status, wcqe->parameter);
395 if (cmdwqe->context3) {
396 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
397 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
399 cmdwqe->context3 = NULL;
401 if (pnvme_lsreq->done)
402 pnvme_lsreq->done(pnvme_lsreq, status);
404 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
405 "6046 nvme cmpl without done call back? "
406 "Data %p DID %x Xri: %x status %x\n",
407 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
408 cmdwqe->sli4_xritag, status);
411 cmdwqe->context1 = NULL;
413 lpfc_sli_release_iocbq(phba, cmdwqe);
417 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
418 struct lpfc_dmabuf *inp,
419 struct nvmefc_ls_req *pnvme_lsreq,
420 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
421 struct lpfc_wcqe_complete *),
422 struct lpfc_nodelist *ndlp, uint32_t num_entry,
423 uint32_t tmo, uint8_t retry)
425 struct lpfc_hba *phba = vport->phba;
426 union lpfc_wqe128 *wqe;
427 struct lpfc_iocbq *genwqe;
428 struct ulp_bde64 *bpl;
429 struct ulp_bde64 bde;
430 int i, rc, xmit_len, first_len;
432 /* Allocate buffer for command WQE */
433 genwqe = lpfc_sli_get_iocbq(phba);
438 memset(wqe, 0, sizeof(union lpfc_wqe));
440 genwqe->context3 = (uint8_t *)bmp;
441 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
443 /* Save for completion so we can release these resources */
444 genwqe->context1 = lpfc_nlp_get(ndlp);
445 genwqe->context2 = (uint8_t *)pnvme_lsreq;
446 /* Fill in payload, bp points to frame payload */
449 /* FC spec states we need 3 * ratov for CT requests */
450 tmo = (3 * phba->fc_ratov);
452 /* For this command calculate the xmit length of the request bde. */
455 bpl = (struct ulp_bde64 *)bmp->virt;
456 for (i = 0; i < num_entry; i++) {
457 bde.tus.w = bpl[i].tus.w;
458 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
460 xmit_len += bde.tus.f.bdeSize;
462 first_len = xmit_len;
465 genwqe->rsvd2 = num_entry;
466 genwqe->hba_wqidx = 0;
469 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
470 wqe->generic.bde.tus.f.bdeSize = first_len;
471 wqe->generic.bde.addrLow = bpl[0].addrLow;
472 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
475 wqe->gen_req.request_payload_len = first_len;
480 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
481 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
482 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
483 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
484 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
487 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
488 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
489 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
492 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
493 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
494 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
495 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
498 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
501 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
504 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
505 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
506 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
507 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
508 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
511 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
512 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
515 /* Issue GEN REQ WQE for NPORT <did> */
516 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
517 "6050 Issue GEN REQ WQE to NPORT x%x "
518 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
519 ndlp->nlp_DID, genwqe->iotag,
521 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
522 genwqe->wqe_cmpl = cmpl;
523 genwqe->iocb_cmpl = NULL;
524 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
525 genwqe->vport = vport;
526 genwqe->retry = retry;
528 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
529 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
531 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
533 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
534 "6045 Issue GEN REQ WQE to NPORT x%x "
536 ndlp->nlp_DID, genwqe->iotag,
538 lpfc_sli_release_iocbq(phba, genwqe);
545 * lpfc_nvme_ls_req - Issue an Link Service request
546 * @lpfc_pnvme: Pointer to the driver's nvme instance data
547 * @lpfc_nvme_lport: Pointer to the driver's local port data
548 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
550 * Driver registers this routine to handle any link service request
551 * from the nvme_fc transport to a remote nvme-aware port.
555 * TODO: What are the failure codes.
558 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
559 struct nvme_fc_remote_port *pnvme_rport,
560 struct nvmefc_ls_req *pnvme_lsreq)
563 struct lpfc_nvme_lport *lport;
564 struct lpfc_nvme_rport *rport;
565 struct lpfc_vport *vport;
566 struct lpfc_nodelist *ndlp;
567 struct ulp_bde64 *bpl;
568 struct lpfc_dmabuf *bmp;
569 uint16_t ntype, nstate;
571 /* there are two dma buf in the request, actually there is one and
572 * the second one is just the start address + cmd size.
573 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
574 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
575 * because the nvem layer owns the data bufs.
576 * We do not have to break these packets open, we don't care what is in
577 * them. And we do not have to look at the resonse data, we only care
578 * that we got a response. All of the caring is going to happen in the
582 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
583 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
584 if (unlikely(!lport) || unlikely(!rport))
587 vport = lport->vport;
589 if (vport->load_flag & FC_UNLOADING)
592 /* Need the ndlp. It is stored in the driver's rport. */
594 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
595 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
596 "6051 Remoteport %p, rport has invalid ndlp. "
597 "Failing LS Req\n", pnvme_rport);
601 /* The remote node has to be a mapped nvme target or an
602 * unmapped nvme initiator or it's an error.
604 ntype = ndlp->nlp_type;
605 nstate = ndlp->nlp_state;
606 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
607 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
608 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
609 "6088 DID x%06x not ready for "
610 "IO. State x%x, Type x%x\n",
611 pnvme_rport->port_id,
612 ndlp->nlp_state, ndlp->nlp_type);
615 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
618 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
619 "6044 Could not find node for DID %x\n",
620 pnvme_rport->port_id);
623 INIT_LIST_HEAD(&bmp->list);
624 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
626 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
627 "6042 Could not find node for DID %x\n",
628 pnvme_rport->port_id);
632 bpl = (struct ulp_bde64 *)bmp->virt;
633 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
634 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
635 bpl->tus.f.bdeFlags = 0;
636 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
637 bpl->tus.w = le32_to_cpu(bpl->tus.w);
640 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
641 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
642 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
643 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
644 bpl->tus.w = le32_to_cpu(bpl->tus.w);
646 /* Expand print to include key fields. */
647 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
648 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
649 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
651 pnvme_lport, pnvme_rport,
652 pnvme_lsreq, pnvme_lsreq->rqstlen,
653 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
654 &pnvme_lsreq->rspdma);
656 atomic_inc(&lport->fc4NvmeLsRequests);
658 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
659 * This code allows it all to work.
661 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
662 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
664 if (ret != WQE_SUCCESS) {
665 atomic_inc(&lport->xmt_ls_err);
666 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
667 "6052 EXIT. issue ls wqe failed lport %p, "
668 "rport %p lsreq%p Status %x DID %x\n",
669 pnvme_lport, pnvme_rport, pnvme_lsreq,
671 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
676 /* Stub in routine and return 0 for now. */
681 * lpfc_nvme_ls_abort - Issue an Link Service request
682 * @lpfc_pnvme: Pointer to the driver's nvme instance data
683 * @lpfc_nvme_lport: Pointer to the driver's local port data
684 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
686 * Driver registers this routine to handle any link service request
687 * from the nvme_fc transport to a remote nvme-aware port.
691 * TODO: What are the failure codes.
694 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
695 struct nvme_fc_remote_port *pnvme_rport,
696 struct nvmefc_ls_req *pnvme_lsreq)
698 struct lpfc_nvme_lport *lport;
699 struct lpfc_vport *vport;
700 struct lpfc_hba *phba;
701 struct lpfc_nodelist *ndlp;
702 LIST_HEAD(abort_list);
703 struct lpfc_sli_ring *pring;
704 struct lpfc_iocbq *wqe, *next_wqe;
706 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
707 if (unlikely(!lport))
709 vport = lport->vport;
712 if (vport->load_flag & FC_UNLOADING)
715 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
717 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
718 "6049 Could not find node for DID %x\n",
719 pnvme_rport->port_id);
723 /* Expand print to include key fields. */
724 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
725 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
726 "rsplen:%d %pad %pad\n",
727 pnvme_lport, pnvme_rport,
728 pnvme_lsreq, pnvme_lsreq->rqstlen,
729 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
730 &pnvme_lsreq->rspdma);
733 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
734 * that need an ABTS. The IOs need to stay on the txcmplq so that
735 * the abort operation completes them successfully.
737 pring = phba->sli4_hba.nvmels_wq->pring;
738 spin_lock_irq(&phba->hbalock);
739 spin_lock(&pring->ring_lock);
740 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
741 /* Add to abort_list on on NDLP match. */
742 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
743 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
744 list_add_tail(&wqe->dlist, &abort_list);
747 spin_unlock(&pring->ring_lock);
748 spin_unlock_irq(&phba->hbalock);
750 /* Abort the targeted IOs and remove them from the abort list. */
751 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
752 atomic_inc(&lport->xmt_ls_abort);
753 spin_lock_irq(&phba->hbalock);
754 list_del_init(&wqe->dlist);
755 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
756 spin_unlock_irq(&phba->hbalock);
760 /* Fix up the existing sgls for NVME IO. */
762 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
763 struct lpfc_nvme_buf *lpfc_ncmd,
764 struct nvmefc_fcp_req *nCmd)
766 struct lpfc_hba *phba = vport->phba;
767 struct sli4_sge *sgl;
768 union lpfc_wqe128 *wqe;
769 uint32_t *wptr, *dptr;
772 * Get a local pointer to the built-in wqe and correct
773 * the cmd size to match NVME's 96 bytes and fix
777 wqe = &lpfc_ncmd->cur_iocbq.wqe;
780 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
781 * match NVME. NVME sends 96 bytes. Also, use the
782 * nvme commands command and response dma addresses
783 * rather than the virtual memory to ease the restore
786 sgl = lpfc_ncmd->nvme_sgl;
787 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
788 if (phba->cfg_nvme_embed_cmd) {
792 /* Word 0-2 - NVME CMND IU (embedded payload) */
793 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
794 wqe->generic.bde.tus.f.bdeSize = 56;
795 wqe->generic.bde.addrHigh = 0;
796 wqe->generic.bde.addrLow = 64; /* Word 16 */
798 /* Word 10 - dbde is 0, wqes is 1 in template */
801 * Embed the payload in the last half of the WQE
802 * WQE words 16-30 get the NVME CMD IU payload
804 * WQE words 16-19 get payload Words 1-4
805 * WQE words 20-21 get payload Words 6-7
806 * WQE words 22-29 get payload Words 16-23
808 wptr = &wqe->words[16]; /* WQE ptr */
809 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
810 dptr++; /* Skip Word 0 in payload */
812 *wptr++ = *dptr++; /* Word 1 */
813 *wptr++ = *dptr++; /* Word 2 */
814 *wptr++ = *dptr++; /* Word 3 */
815 *wptr++ = *dptr++; /* Word 4 */
816 dptr++; /* Skip Word 5 in payload */
817 *wptr++ = *dptr++; /* Word 6 */
818 *wptr++ = *dptr++; /* Word 7 */
819 dptr += 8; /* Skip Words 8-15 in payload */
820 *wptr++ = *dptr++; /* Word 16 */
821 *wptr++ = *dptr++; /* Word 17 */
822 *wptr++ = *dptr++; /* Word 18 */
823 *wptr++ = *dptr++; /* Word 19 */
824 *wptr++ = *dptr++; /* Word 20 */
825 *wptr++ = *dptr++; /* Word 21 */
826 *wptr++ = *dptr++; /* Word 22 */
827 *wptr = *dptr; /* Word 23 */
829 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
830 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
832 /* Word 0-2 - NVME CMND IU Inline BDE */
833 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
834 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
835 wqe->generic.bde.addrHigh = sgl->addr_hi;
836 wqe->generic.bde.addrLow = sgl->addr_lo;
839 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
840 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
845 /* Setup the physical region for the FCP RSP */
846 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
847 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
848 sgl->word2 = le32_to_cpu(sgl->word2);
850 bf_set(lpfc_sli4_sge_last, sgl, 0);
852 bf_set(lpfc_sli4_sge_last, sgl, 1);
853 sgl->word2 = cpu_to_le32(sgl->word2);
854 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
857 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
859 lpfc_nvme_ktime(struct lpfc_hba *phba,
860 struct lpfc_nvme_buf *lpfc_ncmd)
862 uint64_t seg1, seg2, seg3, seg4;
865 if (!lpfc_ncmd->ts_last_cmd ||
866 !lpfc_ncmd->ts_cmd_start ||
867 !lpfc_ncmd->ts_cmd_wqput ||
868 !lpfc_ncmd->ts_isr_cmpl ||
869 !lpfc_ncmd->ts_data_nvme)
872 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
874 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
876 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
878 if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
880 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
883 * Segment 1 - Time from Last FCP command cmpl is handed
884 * off to NVME Layer to start of next command.
885 * Segment 2 - Time from Driver receives a IO cmd start
886 * from NVME Layer to WQ put is done on IO cmd.
887 * Segment 3 - Time from Driver WQ put is done on IO cmd
888 * to MSI-X ISR for IO cmpl.
889 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
890 * cmpl is handled off to the NVME Layer.
892 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
893 if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
896 /* Calculate times relative to start of IO */
897 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
899 seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
905 seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
910 phba->ktime_data_samples++;
911 phba->ktime_seg1_total += seg1;
912 if (seg1 < phba->ktime_seg1_min)
913 phba->ktime_seg1_min = seg1;
914 else if (seg1 > phba->ktime_seg1_max)
915 phba->ktime_seg1_max = seg1;
916 phba->ktime_seg2_total += seg2;
917 if (seg2 < phba->ktime_seg2_min)
918 phba->ktime_seg2_min = seg2;
919 else if (seg2 > phba->ktime_seg2_max)
920 phba->ktime_seg2_max = seg2;
921 phba->ktime_seg3_total += seg3;
922 if (seg3 < phba->ktime_seg3_min)
923 phba->ktime_seg3_min = seg3;
924 else if (seg3 > phba->ktime_seg3_max)
925 phba->ktime_seg3_max = seg3;
926 phba->ktime_seg4_total += seg4;
927 if (seg4 < phba->ktime_seg4_min)
928 phba->ktime_seg4_min = seg4;
929 else if (seg4 > phba->ktime_seg4_max)
930 phba->ktime_seg4_max = seg4;
932 lpfc_ncmd->ts_last_cmd = 0;
933 lpfc_ncmd->ts_cmd_start = 0;
934 lpfc_ncmd->ts_cmd_wqput = 0;
935 lpfc_ncmd->ts_isr_cmpl = 0;
936 lpfc_ncmd->ts_data_nvme = 0;
941 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
942 * @lpfc_pnvme: Pointer to the driver's nvme instance data
943 * @lpfc_nvme_lport: Pointer to the driver's local port data
944 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
946 * Driver registers this routine as it io request handler. This
947 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
948 * data structure to the rport indicated in @lpfc_nvme_rport.
952 * TODO: What are the failure codes.
955 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
956 struct lpfc_wcqe_complete *wcqe)
958 struct lpfc_nvme_buf *lpfc_ncmd =
959 (struct lpfc_nvme_buf *)pwqeIn->context1;
960 struct lpfc_vport *vport = pwqeIn->vport;
961 struct nvmefc_fcp_req *nCmd;
962 struct nvme_fc_ersp_iu *ep;
963 struct nvme_fc_cmd_iu *cp;
964 struct lpfc_nodelist *ndlp;
965 struct lpfc_nvme_fcpreq_priv *freqpriv;
966 struct lpfc_nvme_lport *lport;
967 struct lpfc_nvme_ctrl_stat *cstat;
968 uint32_t code, status, idx;
969 uint16_t cid, sqhd, data;
972 /* Sanity check on return of outstanding command */
973 if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd) {
975 lpfc_printf_vlog(vport, KERN_ERR,
976 LOG_NODE | LOG_NVME_IOERR,
977 "6071 Null lpfc_ncmd pointer. No "
978 "release, skip completion\n");
982 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
983 "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
985 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
987 /* Release the lpfc_ncmd regardless of the missing elements. */
988 lpfc_release_nvme_buf(phba, lpfc_ncmd);
991 nCmd = lpfc_ncmd->nvmeCmd;
992 status = bf_get(lpfc_wcqe_c_status, wcqe);
994 if (vport->localport) {
995 lport = (struct lpfc_nvme_lport *)vport->localport->private;
997 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
998 cstat = &lport->cstat[idx];
999 atomic_inc(&cstat->fc4NvmeIoCmpls);
1001 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1002 atomic_inc(&lport->cmpl_fcp_xb);
1003 atomic_inc(&lport->cmpl_fcp_err);
1008 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1009 lpfc_ncmd->cur_iocbq.sli4_xritag,
1010 status, wcqe->parameter);
1012 * Catch race where our node has transitioned, but the
1013 * transport is still transitioning.
1015 ndlp = lpfc_ncmd->ndlp;
1016 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1017 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1018 "6062 Ignoring NVME cmpl. No ndlp\n");
1022 code = bf_get(lpfc_wcqe_c_code, wcqe);
1023 if (code == CQE_CODE_NVME_ERSP) {
1024 /* For this type of CQE, we need to rebuild the rsp */
1025 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1028 * Get Command Id from cmd to plug into response. This
1029 * code is not needed in the next NVME Transport drop.
1031 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1032 cid = cp->sqe.common.command_id;
1035 * RSN is in CQE word 2
1036 * SQHD is in CQE Word 3 bits 15:0
1037 * Cmd Specific info is in CQE Word 1
1038 * and in CQE Word 0 bits 15:0
1040 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1042 /* Now lets build the NVME ERSP IU */
1043 ep->iu_len = cpu_to_be16(8);
1044 ep->rsn = wcqe->parameter;
1045 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1047 ptr = (uint32_t *)&ep->cqe.result.u64;
1048 *ptr++ = wcqe->total_data_placed;
1049 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1050 *ptr = (uint32_t)data;
1051 ep->cqe.sq_head = sqhd;
1052 ep->cqe.sq_id = nCmd->sqid;
1053 ep->cqe.command_id = cid;
1056 lpfc_ncmd->status = IOSTAT_SUCCESS;
1057 lpfc_ncmd->result = 0;
1058 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1059 nCmd->transferred_length = nCmd->payload_length;
1061 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1062 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1064 /* For NVME, the only failure path that results in an
1065 * IO error is when the adapter rejects it. All other
1066 * conditions are a success case and resolved by the
1068 * IOSTAT_FCP_RSP_ERROR means:
1069 * 1. Length of data received doesn't match total
1070 * transfer length in WQE
1071 * 2. If the RSP payload does NOT match these cases:
1072 * a. RSP length 12/24 bytes and all zeros
1075 switch (lpfc_ncmd->status) {
1076 case IOSTAT_SUCCESS:
1077 nCmd->transferred_length = wcqe->total_data_placed;
1078 nCmd->rcv_rsplen = 0;
1081 case IOSTAT_FCP_RSP_ERROR:
1082 nCmd->transferred_length = wcqe->total_data_placed;
1083 nCmd->rcv_rsplen = wcqe->parameter;
1086 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1088 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1089 "6081 NVME Completion Protocol Error: "
1090 "xri %x status x%x result x%x "
1092 lpfc_ncmd->cur_iocbq.sli4_xritag,
1093 lpfc_ncmd->status, lpfc_ncmd->result,
1094 wcqe->total_data_placed);
1096 case IOSTAT_LOCAL_REJECT:
1097 /* Let fall through to set command final state. */
1098 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1099 lpfc_printf_vlog(vport, KERN_INFO,
1101 "6032 Delay Aborted cmd %p "
1102 "nvme cmd %p, xri x%x, "
1105 lpfc_ncmd->cur_iocbq.sli4_xritag,
1106 bf_get(lpfc_wcqe_c_xb, wcqe));
1109 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1110 "6072 NVME Completion Error: xri %x "
1111 "status x%x result x%x placed x%x\n",
1112 lpfc_ncmd->cur_iocbq.sli4_xritag,
1113 lpfc_ncmd->status, lpfc_ncmd->result,
1114 wcqe->total_data_placed);
1115 nCmd->transferred_length = 0;
1116 nCmd->rcv_rsplen = 0;
1117 nCmd->status = NVME_SC_INTERNAL;
1121 /* pick up SLI4 exhange busy condition */
1122 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1123 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1125 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1127 /* Update stats and complete the IO. There is
1128 * no need for dma unprep because the nvme_transport
1129 * owns the dma address.
1131 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1132 if (lpfc_ncmd->ts_cmd_start) {
1133 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1134 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
1135 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1136 lpfc_nvme_ktime(phba, lpfc_ncmd);
1138 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1139 if (lpfc_ncmd->cpu != smp_processor_id())
1140 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1141 "6701 CPU Check cmpl: "
1142 "cpu %d expect %d\n",
1143 smp_processor_id(), lpfc_ncmd->cpu);
1144 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1145 phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
1149 /* NVME targets need completion held off until the abort exchange
1150 * completes unless the NVME Rport is getting unregistered.
1153 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1154 freqpriv = nCmd->private;
1155 freqpriv->nvme_buf = NULL;
1157 lpfc_ncmd->nvmeCmd = NULL;
1160 /* Call release with XB=1 to queue the IO into the abort list. */
1161 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1166 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1167 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1168 * @lpfc_nvme_lport: Pointer to the driver's local port data
1169 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1170 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1171 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1173 * Driver registers this routine as it io request handler. This
1174 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1175 * data structure to the rport indicated in @lpfc_nvme_rport.
1179 * TODO: What are the failure codes.
1182 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1183 struct lpfc_nvme_buf *lpfc_ncmd,
1184 struct lpfc_nodelist *pnode,
1185 struct lpfc_nvme_ctrl_stat *cstat)
1187 struct lpfc_hba *phba = vport->phba;
1188 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1189 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1190 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1193 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1197 * There are three possibilities here - use scatter-gather segment, use
1198 * the single mapping, or neither.
1201 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1202 /* From the iwrite template, initialize words 7 - 11 */
1203 memcpy(&wqe->words[7],
1204 &lpfc_iwrite_cmd_template.words[7],
1205 sizeof(uint32_t) * 5);
1208 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1211 if ((phba->cfg_nvme_enable_fb) &&
1212 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1213 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1214 if (req_len < pnode->nvme_fb_size)
1215 wqe->fcp_iwrite.initial_xfer_len =
1218 wqe->fcp_iwrite.initial_xfer_len =
1219 pnode->nvme_fb_size;
1221 wqe->fcp_iwrite.initial_xfer_len = 0;
1223 atomic_inc(&cstat->fc4NvmeOutputRequests);
1225 /* From the iread template, initialize words 7 - 11 */
1226 memcpy(&wqe->words[7],
1227 &lpfc_iread_cmd_template.words[7],
1228 sizeof(uint32_t) * 5);
1231 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1234 wqe->fcp_iread.rsrvd5 = 0;
1236 atomic_inc(&cstat->fc4NvmeInputRequests);
1239 /* From the icmnd template, initialize words 4 - 11 */
1240 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1241 sizeof(uint32_t) * 8);
1242 atomic_inc(&cstat->fc4NvmeControlRequests);
1245 * Finish initializing those WQE fields that are independent
1246 * of the nvme_cmnd request_buffer
1250 bf_set(payload_offset_len, &wqe->fcp_icmd,
1251 (nCmd->rsplen + nCmd->cmdlen));
1254 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1255 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1256 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1259 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1262 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1264 /* Words 13 14 15 are for PBDE support */
1266 pwqeq->vport = vport;
1272 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1273 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1274 * @lpfc_nvme_lport: Pointer to the driver's local port data
1275 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1276 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1277 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1279 * Driver registers this routine as it io request handler. This
1280 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1281 * data structure to the rport indicated in @lpfc_nvme_rport.
1285 * TODO: What are the failure codes.
1288 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1289 struct lpfc_nvme_buf *lpfc_ncmd)
1291 struct lpfc_hba *phba = vport->phba;
1292 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1293 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1294 struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
1295 struct scatterlist *data_sg;
1296 struct sli4_sge *first_data_sgl;
1297 struct ulp_bde64 *bde;
1298 dma_addr_t physaddr;
1299 uint32_t num_bde = 0;
1301 uint32_t dma_offset = 0;
1304 /* Fix up the command and response DMA stuff. */
1305 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1308 * There are three possibilities here - use scatter-gather segment, use
1309 * the single mapping, or neither.
1313 * Jump over the cmd and rsp SGEs. The fix routine
1314 * has already adjusted for this.
1318 first_data_sgl = sgl;
1319 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1320 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1321 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1322 "6058 Too many sg segments from "
1323 "NVME Transport. Max %d, "
1324 "nvmeIO sg_cnt %d\n",
1325 phba->cfg_nvme_seg_cnt + 1,
1326 lpfc_ncmd->seg_cnt);
1327 lpfc_ncmd->seg_cnt = 0;
1332 * The driver established a maximum scatter-gather segment count
1333 * during probe that limits the number of sg elements in any
1334 * single nvme command. Just run through the seg_cnt and format
1337 nseg = nCmd->sg_cnt;
1338 data_sg = nCmd->first_sgl;
1339 for (i = 0; i < nseg; i++) {
1340 if (data_sg == NULL) {
1341 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1342 "6059 dptr err %d, nseg %d\n",
1344 lpfc_ncmd->seg_cnt = 0;
1347 physaddr = data_sg->dma_address;
1348 dma_len = data_sg->length;
1349 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1350 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1351 sgl->word2 = le32_to_cpu(sgl->word2);
1352 if ((num_bde + 1) == nseg)
1353 bf_set(lpfc_sli4_sge_last, sgl, 1);
1355 bf_set(lpfc_sli4_sge_last, sgl, 0);
1356 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1357 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1358 sgl->word2 = cpu_to_le32(sgl->word2);
1359 sgl->sge_len = cpu_to_le32(dma_len);
1361 dma_offset += dma_len;
1362 data_sg = sg_next(data_sg);
1365 if (phba->cfg_enable_pbde) {
1366 /* Use PBDE support for first SGL only, offset == 0 */
1368 bde = (struct ulp_bde64 *)
1370 bde->addrLow = first_data_sgl->addr_lo;
1371 bde->addrHigh = first_data_sgl->addr_hi;
1372 bde->tus.f.bdeSize =
1373 le32_to_cpu(first_data_sgl->sge_len);
1374 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1375 bde->tus.w = cpu_to_le32(bde->tus.w);
1376 /* wqe_pbde is 1 in template */
1378 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1379 bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1383 /* For this clause to be valid, the payload_length
1384 * and sg_cnt must zero.
1386 if (nCmd->payload_length != 0) {
1387 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1388 "6063 NVME DMA Prep Err: sg_cnt %d "
1389 "payload_length x%x\n",
1390 nCmd->sg_cnt, nCmd->payload_length);
1398 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1399 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1400 * @lpfc_nvme_lport: Pointer to the driver's local port data
1401 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1402 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1403 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1405 * Driver registers this routine as it io request handler. This
1406 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1407 * data structure to the rport
1408 indicated in @lpfc_nvme_rport.
1412 * TODO: What are the failure codes.
1415 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1416 struct nvme_fc_remote_port *pnvme_rport,
1417 void *hw_queue_handle,
1418 struct nvmefc_fcp_req *pnvme_fcreq)
1423 struct lpfc_nvme_lport *lport;
1424 struct lpfc_nvme_ctrl_stat *cstat;
1425 struct lpfc_vport *vport;
1426 struct lpfc_hba *phba;
1427 struct lpfc_nodelist *ndlp;
1428 struct lpfc_nvme_buf *lpfc_ncmd;
1429 struct lpfc_nvme_rport *rport;
1430 struct lpfc_nvme_qhandle *lpfc_queue_info;
1431 struct lpfc_nvme_fcpreq_priv *freqpriv;
1432 struct nvme_common_command *sqe;
1433 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1437 /* Validate pointers. LLDD fault handling with transport does
1438 * have timing races.
1440 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1441 if (unlikely(!lport)) {
1446 vport = lport->vport;
1448 if (unlikely(!hw_queue_handle)) {
1449 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1450 "6117 Fail IO, NULL hw_queue_handle\n");
1451 atomic_inc(&lport->xmt_fcp_err);
1458 if (vport->load_flag & FC_UNLOADING) {
1463 if (vport->load_flag & FC_UNLOADING) {
1464 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1465 "6124 Fail IO, Driver unload\n");
1466 atomic_inc(&lport->xmt_fcp_err);
1471 freqpriv = pnvme_fcreq->private;
1472 if (unlikely(!freqpriv)) {
1473 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1474 "6158 Fail IO, NULL request data\n");
1475 atomic_inc(&lport->xmt_fcp_err);
1480 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1482 start = ktime_get_ns();
1484 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1485 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1488 * Catch race where our node has transitioned, but the
1489 * transport is still transitioning.
1492 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1493 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1494 "6053 Fail IO, ndlp not ready: rport %p "
1495 "ndlp %p, DID x%06x\n",
1496 rport, ndlp, pnvme_rport->port_id);
1497 atomic_inc(&lport->xmt_fcp_err);
1502 /* The remote node has to be a mapped target or it's an error. */
1503 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1504 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1505 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1506 "6036 Fail IO, DID x%06x not ready for "
1507 "IO. State x%x, Type x%x Flg x%x\n",
1508 pnvme_rport->port_id,
1509 ndlp->nlp_state, ndlp->nlp_type,
1510 ndlp->upcall_flags);
1511 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1517 /* Currently only NVME Keep alive commands should be expedited
1518 * if the driver runs out of a resource. These should only be
1519 * issued on the admin queue, qidx 0
1521 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1522 sqe = &((struct nvme_fc_cmd_iu *)
1523 pnvme_fcreq->cmdaddr)->sqe.common;
1524 if (sqe->opcode == nvme_admin_keep_alive)
1528 /* The node is shared with FCP IO, make sure the IO pending count does
1529 * not exceed the programmed depth.
1531 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1532 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1534 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1535 "6174 Fail IO, ndlp qdepth exceeded: "
1536 "idx %d DID %x pend %d qdepth %d\n",
1537 lpfc_queue_info->index, ndlp->nlp_DID,
1538 atomic_read(&ndlp->cmd_pending),
1540 atomic_inc(&lport->xmt_fcp_qdepth);
1546 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
1547 if (lpfc_ncmd == NULL) {
1548 atomic_inc(&lport->xmt_fcp_noxri);
1549 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1550 "6065 Fail IO, driver buffer pool is empty: "
1552 lpfc_queue_info->index, ndlp->nlp_DID);
1556 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1558 lpfc_ncmd->ts_cmd_start = start;
1559 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1561 lpfc_ncmd->ts_cmd_start = 0;
1566 * Store the data needed by the driver to issue, abort, and complete
1568 * Do not let the IO hang out forever. There is no midlayer issuing
1569 * an abort so inform the FW of the maximum IO pending time.
1571 freqpriv->nvme_buf = lpfc_ncmd;
1572 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1573 lpfc_ncmd->ndlp = ndlp;
1574 lpfc_ncmd->start_time = jiffies;
1577 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1578 * This identfier was create in our hardware queue create callback
1579 * routine. The driver now is dependent on the IO queue steering from
1580 * the transport. We are trusting the upper NVME layers know which
1581 * index to use and that they have affinitized a CPU to this hardware
1582 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1584 idx = lpfc_queue_info->index;
1585 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1586 cstat = &lport->cstat[idx];
1588 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1589 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1591 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1592 "6175 Fail IO, Prep DMA: "
1594 lpfc_queue_info->index, ndlp->nlp_DID);
1595 atomic_inc(&lport->xmt_fcp_err);
1597 goto out_free_nvme_buf;
1600 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1601 lpfc_ncmd->cur_iocbq.sli4_xritag,
1602 lpfc_queue_info->index, ndlp->nlp_DID);
1604 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1606 atomic_inc(&lport->xmt_fcp_wqerr);
1607 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1608 "6113 Fail IO, Could not issue WQE err %x "
1609 "sid: x%x did: x%x oxid: x%x\n",
1610 ret, vport->fc_myDID, ndlp->nlp_DID,
1611 lpfc_ncmd->cur_iocbq.sli4_xritag);
1612 goto out_free_nvme_buf;
1615 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1616 if (lpfc_ncmd->ts_cmd_start)
1617 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1619 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1620 lpfc_ncmd->cpu = smp_processor_id();
1621 if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
1622 /* Check for admin queue */
1623 if (lpfc_queue_info->qidx) {
1624 lpfc_printf_vlog(vport,
1625 KERN_ERR, LOG_NVME_IOERR,
1626 "6702 CPU Check cmd: "
1629 lpfc_queue_info->index);
1631 lpfc_ncmd->cpu = lpfc_queue_info->index;
1633 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1634 phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
1640 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1641 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1642 atomic_dec(&cstat->fc4NvmeOutputRequests);
1644 atomic_dec(&cstat->fc4NvmeInputRequests);
1646 atomic_dec(&cstat->fc4NvmeControlRequests);
1647 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1653 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1654 * @phba: Pointer to HBA context object
1655 * @cmdiocb: Pointer to command iocb object.
1656 * @rspiocb: Pointer to response iocb object.
1658 * This is the callback function for any NVME FCP IO that was aborted.
1664 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1665 struct lpfc_wcqe_complete *abts_cmpl)
1667 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1668 "6145 ABORT_XRI_CN completing on rpi x%x "
1669 "original iotag x%x, abort cmd iotag x%x "
1670 "req_tag x%x, status x%x, hwstatus x%x\n",
1671 cmdiocb->iocb.un.acxri.abortContextTag,
1672 cmdiocb->iocb.un.acxri.abortIoTag,
1674 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1675 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1676 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1677 lpfc_sli_release_iocbq(phba, cmdiocb);
1681 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1682 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1683 * @lpfc_nvme_lport: Pointer to the driver's local port data
1684 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1685 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1686 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1688 * Driver registers this routine as its nvme request io abort handler. This
1689 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1690 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1691 * is executed asynchronously - one the target is validated as "MAPPED" and
1692 * ready for IO, the driver issues the abort request and returns.
1698 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1699 struct nvme_fc_remote_port *pnvme_rport,
1700 void *hw_queue_handle,
1701 struct nvmefc_fcp_req *pnvme_fcreq)
1703 struct lpfc_nvme_lport *lport;
1704 struct lpfc_vport *vport;
1705 struct lpfc_hba *phba;
1706 struct lpfc_nvme_buf *lpfc_nbuf;
1707 struct lpfc_iocbq *abts_buf;
1708 struct lpfc_iocbq *nvmereq_wqe;
1709 struct lpfc_nvme_fcpreq_priv *freqpriv;
1710 union lpfc_wqe128 *abts_wqe;
1711 unsigned long flags;
1714 /* Validate pointers. LLDD fault handling with transport does
1715 * have timing races.
1717 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1718 if (unlikely(!lport))
1721 vport = lport->vport;
1723 if (unlikely(!hw_queue_handle)) {
1724 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1725 "6129 Fail Abort, HW Queue Handle NULL.\n");
1730 freqpriv = pnvme_fcreq->private;
1732 if (unlikely(!freqpriv))
1734 if (vport->load_flag & FC_UNLOADING)
1737 /* Announce entry to new IO submit field. */
1738 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1739 "6002 Abort Request to rport DID x%06x "
1740 "for nvme_fc_req %p\n",
1741 pnvme_rport->port_id,
1744 /* If the hba is getting reset, this flag is set. It is
1745 * cleared when the reset is complete and rings reestablished.
1747 spin_lock_irqsave(&phba->hbalock, flags);
1748 /* driver queued commands are in process of being flushed */
1749 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1750 spin_unlock_irqrestore(&phba->hbalock, flags);
1751 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1752 "6139 Driver in reset cleanup - flushing "
1753 "NVME Req now. hba_flag x%x\n",
1758 lpfc_nbuf = freqpriv->nvme_buf;
1760 spin_unlock_irqrestore(&phba->hbalock, flags);
1761 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1762 "6140 NVME IO req has no matching lpfc nvme "
1763 "io buffer. Skipping abort req.\n");
1765 } else if (!lpfc_nbuf->nvmeCmd) {
1766 spin_unlock_irqrestore(&phba->hbalock, flags);
1767 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1768 "6141 lpfc NVME IO req has no nvme_fcreq "
1769 "io buffer. Skipping abort req.\n");
1772 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1775 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1776 * state must match the nvme_fcreq passed by the nvme
1777 * transport. If they don't match, it is likely the driver
1778 * has already completed the NVME IO and the nvme transport
1779 * has not seen it yet.
1781 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1782 spin_unlock_irqrestore(&phba->hbalock, flags);
1783 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1784 "6143 NVME req mismatch: "
1785 "lpfc_nbuf %p nvmeCmd %p, "
1786 "pnvme_fcreq %p. Skipping Abort xri x%x\n",
1787 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1788 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1792 /* Don't abort IOs no longer on the pending queue. */
1793 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1794 spin_unlock_irqrestore(&phba->hbalock, flags);
1795 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1796 "6142 NVME IO req %p not queued - skipping "
1797 "abort req xri x%x\n",
1798 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1802 atomic_inc(&lport->xmt_fcp_abort);
1803 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1804 nvmereq_wqe->sli4_xritag,
1805 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1807 /* Outstanding abort is in progress */
1808 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1809 spin_unlock_irqrestore(&phba->hbalock, flags);
1810 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1811 "6144 Outstanding NVME I/O Abort Request "
1812 "still pending on nvme_fcreq %p, "
1813 "lpfc_ncmd %p xri x%x\n",
1814 pnvme_fcreq, lpfc_nbuf,
1815 nvmereq_wqe->sli4_xritag);
1819 abts_buf = __lpfc_sli_get_iocbq(phba);
1821 spin_unlock_irqrestore(&phba->hbalock, flags);
1822 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1823 "6136 No available abort wqes. Skipping "
1824 "Abts req for nvme_fcreq %p xri x%x\n",
1825 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1829 /* Ready - mark outstanding as aborted by driver. */
1830 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1832 /* Complete prepping the abort wqe and issue to the FW. */
1833 abts_wqe = &abts_buf->wqe;
1835 /* WQEs are reused. Clear stale data and set key fields to
1836 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1838 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1839 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1842 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1843 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1844 nvmereq_wqe->iocb.ulpClass);
1846 /* word 8 - tell the FW to abort the IO associated with this
1847 * outstanding exchange ID.
1849 abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1851 /* word 9 - this is the iotag for the abts_wqe completion. */
1852 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1856 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1857 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1860 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1861 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1862 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1864 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1865 abts_buf->iocb_flag |= LPFC_IO_NVME;
1866 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1867 abts_buf->vport = vport;
1868 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1869 ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
1870 spin_unlock_irqrestore(&phba->hbalock, flags);
1872 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1873 "6137 Failed abts issue_wqe with status x%x "
1874 "for nvme_fcreq %p.\n",
1875 ret_val, pnvme_fcreq);
1876 lpfc_sli_release_iocbq(phba, abts_buf);
1880 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1881 "6138 Transport Abort NVME Request Issued for "
1882 "ox_id x%x on reqtag x%x\n",
1883 nvmereq_wqe->sli4_xritag,
1887 /* Declare and initialization an instance of the FC NVME template. */
1888 static struct nvme_fc_port_template lpfc_nvme_template = {
1889 /* initiator-based functions */
1890 .localport_delete = lpfc_nvme_localport_delete,
1891 .remoteport_delete = lpfc_nvme_remoteport_delete,
1892 .create_queue = lpfc_nvme_create_queue,
1893 .delete_queue = lpfc_nvme_delete_queue,
1894 .ls_req = lpfc_nvme_ls_req,
1895 .fcp_io = lpfc_nvme_fcp_io_submit,
1896 .ls_abort = lpfc_nvme_ls_abort,
1897 .fcp_abort = lpfc_nvme_fcp_abort,
1900 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1901 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1902 .dma_boundary = 0xFFFFFFFF,
1904 /* Sizes of additional private data for data structures.
1905 * No use for the last two sizes at this time.
1907 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1908 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1909 .lsrqst_priv_sz = 0,
1910 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1914 * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
1915 * @phba: pointer to lpfc hba data structure.
1916 * @nblist: pointer to nvme buffer list.
1917 * @count: number of scsi buffers on the list.
1919 * This routine is invoked to post a block of @count scsi sgl pages from a
1920 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
1925 lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
1926 struct list_head *nblist,
1929 struct lpfc_nvme_buf *lpfc_ncmd;
1930 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
1931 struct sgl_page_pairs *sgl_pg_pairs;
1934 uint32_t reqlen, alloclen, pg_pairs;
1936 uint16_t xritag_start = 0;
1938 uint32_t shdr_status, shdr_add_status;
1939 dma_addr_t pdma_phys_bpl1;
1940 union lpfc_sli4_cfg_shdr *shdr;
1942 /* Calculate the requested length of the dma memory */
1943 reqlen = count * sizeof(struct sgl_page_pairs) +
1944 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1945 if (reqlen > SLI4_PAGE_SIZE) {
1946 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1947 "6118 Block sgl registration required DMA "
1948 "size (%d) great than a page\n", reqlen);
1951 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1953 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1954 "6119 Failed to allocate mbox cmd memory\n");
1958 /* Allocate DMA memory and set up the non-embedded mailbox command */
1959 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1960 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
1961 LPFC_SLI4_MBX_NEMBED);
1963 if (alloclen < reqlen) {
1964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1965 "6120 Allocated DMA memory size (%d) is "
1966 "less than the requested DMA memory "
1967 "size (%d)\n", alloclen, reqlen);
1968 lpfc_sli4_mbox_cmd_free(phba, mbox);
1972 /* Get the first SGE entry from the non-embedded DMA memory */
1973 viraddr = mbox->sge_array->addr[0];
1975 /* Set up the SGL pages in the non-embedded DMA pages */
1976 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
1977 sgl_pg_pairs = &sgl->sgl_pg_pairs;
1980 list_for_each_entry(lpfc_ncmd, nblist, list) {
1981 /* Set up the sge entry */
1982 sgl_pg_pairs->sgl_pg0_addr_lo =
1983 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
1984 sgl_pg_pairs->sgl_pg0_addr_hi =
1985 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
1986 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1987 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
1991 sgl_pg_pairs->sgl_pg1_addr_lo =
1992 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
1993 sgl_pg_pairs->sgl_pg1_addr_hi =
1994 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
1995 /* Keep the first xritag on the list */
1997 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
2001 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
2002 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
2003 /* Perform endian conversion if necessary */
2004 sgl->word0 = cpu_to_le32(sgl->word0);
2006 if (!phba->sli4_hba.intr_enable)
2007 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
2009 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
2010 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
2012 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
2013 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2014 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2015 if (rc != MBX_TIMEOUT)
2016 lpfc_sli4_mbox_cmd_free(phba, mbox);
2017 if (shdr_status || shdr_add_status || rc) {
2018 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2019 "6125 POST_SGL_BLOCK mailbox command failed "
2020 "status x%x add_status x%x mbx status x%x\n",
2021 shdr_status, shdr_add_status, rc);
2028 * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
2029 * @phba: pointer to lpfc hba data structure.
2030 * @post_nblist: pointer to the nvme buffer list.
2032 * This routine walks a list of nvme buffers that was passed in. It attempts
2033 * to construct blocks of nvme buffer sgls which contains contiguous xris and
2034 * uses the non-embedded SGL block post mailbox commands to post to the port.
2035 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
2036 * embedded SGL post mailbox command for posting. The @post_nblist passed in
2037 * must be local list, thus no lock is needed when manipulate the list.
2039 * Returns: 0 = failure, non-zero number of successfully posted buffers.
2042 lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
2043 struct list_head *post_nblist, int sb_count)
2045 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2046 int status, sgl_size;
2047 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
2048 dma_addr_t pdma_phys_sgl1;
2049 int last_xritag = NO_XRI;
2051 LIST_HEAD(prep_nblist);
2052 LIST_HEAD(blck_nblist);
2053 LIST_HEAD(nvme_nblist);
2059 sgl_size = phba->cfg_sg_dma_buf_size;
2061 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
2062 list_del_init(&lpfc_ncmd->list);
2064 if ((last_xritag != NO_XRI) &&
2065 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
2066 /* a hole in xri block, form a sgl posting block */
2067 list_splice_init(&prep_nblist, &blck_nblist);
2068 post_cnt = block_cnt - 1;
2069 /* prepare list for next posting block */
2070 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
2073 /* prepare list for next posting block */
2074 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
2075 /* enough sgls for non-embed sgl mbox command */
2076 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
2077 list_splice_init(&prep_nblist, &blck_nblist);
2078 post_cnt = block_cnt;
2083 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
2085 /* end of repost sgl list condition for NVME buffers */
2086 if (num_posting == sb_count) {
2087 if (post_cnt == 0) {
2088 /* last sgl posting block */
2089 list_splice_init(&prep_nblist, &blck_nblist);
2090 post_cnt = block_cnt;
2091 } else if (block_cnt == 1) {
2092 /* last single sgl with non-contiguous xri */
2093 if (sgl_size > SGL_PAGE_SIZE)
2095 lpfc_ncmd->dma_phys_sgl +
2099 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
2100 status = lpfc_sli4_post_sgl(phba,
2101 lpfc_ncmd->dma_phys_sgl,
2102 pdma_phys_sgl1, cur_xritag);
2104 /* failure, put on abort nvme list */
2105 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
2107 /* success, put on NVME buffer list */
2108 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2109 lpfc_ncmd->status = IOSTAT_SUCCESS;
2112 /* success, put on NVME buffer sgl list */
2113 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
2117 /* continue until a nembed page worth of sgls */
2121 /* post block of NVME buffer list sgls */
2122 status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
2125 /* don't reset xirtag due to hole in xri block */
2127 last_xritag = NO_XRI;
2129 /* reset NVME buffer post count for next round of posting */
2132 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
2133 while (!list_empty(&blck_nblist)) {
2134 list_remove_head(&blck_nblist, lpfc_ncmd,
2135 struct lpfc_nvme_buf, list);
2137 /* failure, put on abort nvme list */
2138 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
2140 /* success, put on NVME buffer list */
2141 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2142 lpfc_ncmd->status = IOSTAT_SUCCESS;
2145 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
2148 /* Push NVME buffers with sgl posted to the available list */
2149 while (!list_empty(&nvme_nblist)) {
2150 list_remove_head(&nvme_nblist, lpfc_ncmd,
2151 struct lpfc_nvme_buf, list);
2152 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2158 * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
2159 * @phba: pointer to lpfc hba data structure.
2161 * This routine walks the list of nvme buffers that have been allocated and
2162 * repost them to the port by using SGL block post. This is needed after a
2163 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
2164 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
2165 * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
2167 * Returns: 0 = success, non-zero failure.
2170 lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
2172 LIST_HEAD(post_nblist);
2173 int num_posted, rc = 0;
2175 /* get all NVME buffers need to repost to a local list */
2176 spin_lock_irq(&phba->nvme_buf_list_get_lock);
2177 spin_lock(&phba->nvme_buf_list_put_lock);
2178 list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
2179 list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
2180 phba->get_nvme_bufs = 0;
2181 phba->put_nvme_bufs = 0;
2182 spin_unlock(&phba->nvme_buf_list_put_lock);
2183 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2185 /* post the list of nvme buffer sgls to port if available */
2186 if (!list_empty(&post_nblist)) {
2187 num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
2188 phba->sli4_hba.nvme_xri_cnt);
2189 /* failed to post any nvme buffer, return error */
2190 if (num_posted == 0)
2197 * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
2198 * @vport: The virtual port for which this call being executed.
2199 * @num_to_allocate: The requested number of buffers to allocate.
2201 * This routine allocates nvme buffers for device with SLI-4 interface spec,
2202 * the nvme buffer contains all the necessary information needed to initiate
2203 * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
2204 * them on a list, it post them to the port by using SGL block post.
2207 * int - number of nvme buffers that were allocated and posted.
2208 * 0 = failure, less than num_to_alloc is a partial failure.
2211 lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
2213 struct lpfc_hba *phba = vport->phba;
2214 struct lpfc_nvme_buf *lpfc_ncmd;
2215 struct lpfc_iocbq *pwqeq;
2216 union lpfc_wqe128 *wqe;
2217 struct sli4_sge *sgl;
2218 dma_addr_t pdma_phys_sgl;
2219 uint16_t iotag, lxri = 0;
2220 int bcnt, num_posted;
2221 LIST_HEAD(prep_nblist);
2222 LIST_HEAD(post_nblist);
2223 LIST_HEAD(nvme_nblist);
2225 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
2226 lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
2230 * Get memory from the pci pool to map the virt space to
2231 * pci bus space for an I/O. The DMA buffer includes the
2232 * number of SGE's necessary to support the sg_tablesize.
2234 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
2236 &lpfc_ncmd->dma_handle);
2237 if (!lpfc_ncmd->data) {
2242 lxri = lpfc_sli4_next_xritag(phba);
2243 if (lxri == NO_XRI) {
2244 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
2245 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
2249 pwqeq = &(lpfc_ncmd->cur_iocbq);
2252 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
2253 iotag = lpfc_sli_next_iotag(phba, pwqeq);
2255 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
2256 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
2258 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2259 "6121 Failed to allocated IOTAG for"
2260 " XRI:0x%x\n", lxri);
2261 lpfc_sli4_free_xri(phba, lxri);
2264 pwqeq->sli4_lxritag = lxri;
2265 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
2266 pwqeq->iocb_flag |= LPFC_IO_NVME;
2267 pwqeq->context1 = lpfc_ncmd;
2268 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
2270 /* Initialize local short-hand pointers. */
2271 lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
2272 sgl = lpfc_ncmd->nvme_sgl;
2273 pdma_phys_sgl = lpfc_ncmd->dma_handle;
2274 lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
2276 /* Rsp SGE will be filled in when we rcv an IO
2277 * from the NVME Layer to be sent.
2278 * The cmd is going to be embedded so we need a SKIP SGE.
2280 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2281 bf_set(lpfc_sli4_sge_last, sgl, 0);
2282 sgl->word2 = cpu_to_le32(sgl->word2);
2283 /* Fill in word 3 / sgl_len during cmd submission */
2285 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
2287 /* Initialize WQE */
2288 memset(wqe, 0, sizeof(union lpfc_wqe));
2290 /* add the nvme buffer to a post list */
2291 list_add_tail(&lpfc_ncmd->list, &post_nblist);
2292 spin_lock_irq(&phba->nvme_buf_list_get_lock);
2293 phba->sli4_hba.nvme_xri_cnt++;
2294 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2296 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
2297 "6114 Allocate %d out of %d requested new NVME "
2298 "buffers\n", bcnt, num_to_alloc);
2300 /* post the list of nvme buffer sgls to port if available */
2301 if (!list_empty(&post_nblist))
2302 num_posted = lpfc_post_nvme_sgl_list(phba,
2303 &post_nblist, bcnt);
2310 static inline struct lpfc_nvme_buf *
2311 lpfc_nvme_buf(struct lpfc_hba *phba)
2313 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2315 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2316 &phba->lpfc_nvme_buf_list_get, list) {
2317 list_del_init(&lpfc_ncmd->list);
2318 phba->get_nvme_bufs--;
2325 * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
2326 * @phba: The HBA for which this call is being executed.
2328 * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
2329 * and returns to caller.
2333 * Pointer to lpfc_nvme_buf - Success
2335 static struct lpfc_nvme_buf *
2336 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2339 struct lpfc_nvme_buf *lpfc_ncmd = NULL;
2340 unsigned long iflag = 0;
2342 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2343 if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
2344 lpfc_ncmd = lpfc_nvme_buf(phba);
2346 spin_lock(&phba->nvme_buf_list_put_lock);
2347 list_splice(&phba->lpfc_nvme_buf_list_put,
2348 &phba->lpfc_nvme_buf_list_get);
2349 phba->get_nvme_bufs += phba->put_nvme_bufs;
2350 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2351 phba->put_nvme_bufs = 0;
2352 spin_unlock(&phba->nvme_buf_list_put_lock);
2353 if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
2354 lpfc_ncmd = lpfc_nvme_buf(phba);
2356 spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2358 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
2359 atomic_inc(&ndlp->cmd_pending);
2360 lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
2366 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2367 * @phba: The Hba for which this call is being executed.
2368 * @lpfc_ncmd: The nvme buffer which is being released.
2370 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2371 * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2372 * and cannot be reused for at least RA_TOV amount of time if it was
2376 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2378 unsigned long iflag = 0;
2380 if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2381 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2383 lpfc_ncmd->nonsg_phys = 0;
2384 lpfc_ncmd->ndlp = NULL;
2385 lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
2387 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2388 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2389 "6310 XB release deferred for "
2390 "ox_id x%x on reqtag x%x\n",
2391 lpfc_ncmd->cur_iocbq.sli4_xritag,
2392 lpfc_ncmd->cur_iocbq.iotag);
2394 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2396 list_add_tail(&lpfc_ncmd->list,
2397 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
2398 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
2401 lpfc_ncmd->nvmeCmd = NULL;
2402 lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
2403 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
2404 list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2405 phba->put_nvme_bufs++;
2406 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
2411 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2412 * @pvport - the lpfc_vport instance requesting a localport.
2414 * This routine is invoked to create an nvme localport instance to bind
2415 * to the nvme_fc_transport. It is called once during driver load
2416 * like lpfc_create_shost after all other services are initialized.
2417 * It requires a vport, vpi, and wwns at call time. Other localport
2418 * parameters are modified as the driver's FCID and the Fabric WWN
2423 * -ENOMEM - no heap memory available
2424 * other values - from nvme registration upcall
2427 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2430 struct lpfc_hba *phba = vport->phba;
2431 struct nvme_fc_port_info nfcp_info;
2432 struct nvme_fc_local_port *localport;
2433 struct lpfc_nvme_lport *lport;
2434 struct lpfc_nvme_ctrl_stat *cstat;
2437 /* Initialize this localport instance. The vport wwn usage ensures
2438 * that NPIV is accounted for.
2440 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2441 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2442 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2443 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2445 /* We need to tell the transport layer + 1 because it takes page
2446 * alignment into account. When space for the SGL is allocated we
2447 * allocate + 3, one for cmd, one for rsp and one for this alignment
2449 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2450 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2452 cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
2453 phba->cfg_nvme_io_channel), GFP_KERNEL);
2457 /* localport is allocated from the stack, but the registration
2458 * call allocates heap memory as well as the private area.
2460 #if (IS_ENABLED(CONFIG_NVME_FC))
2461 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2462 &vport->phba->pcidev->dev, &localport);
2467 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2468 "6005 Successfully registered local "
2469 "NVME port num %d, localP %p, private %p, "
2471 localport->port_num, localport,
2473 lpfc_nvme_template.max_sgl_segments);
2475 /* Private is our lport size declared in the template. */
2476 lport = (struct lpfc_nvme_lport *)localport->private;
2477 vport->localport = localport;
2478 lport->vport = vport;
2479 lport->cstat = cstat;
2480 vport->nvmei_support = 1;
2482 atomic_set(&lport->xmt_fcp_noxri, 0);
2483 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2484 atomic_set(&lport->xmt_fcp_qdepth, 0);
2485 atomic_set(&lport->xmt_fcp_err, 0);
2486 atomic_set(&lport->xmt_fcp_wqerr, 0);
2487 atomic_set(&lport->xmt_fcp_abort, 0);
2488 atomic_set(&lport->xmt_ls_abort, 0);
2489 atomic_set(&lport->xmt_ls_err, 0);
2490 atomic_set(&lport->cmpl_fcp_xb, 0);
2491 atomic_set(&lport->cmpl_fcp_err, 0);
2492 atomic_set(&lport->cmpl_ls_xb, 0);
2493 atomic_set(&lport->cmpl_ls_err, 0);
2494 atomic_set(&lport->fc4NvmeLsRequests, 0);
2495 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2497 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
2498 cstat = &lport->cstat[i];
2499 atomic_set(&cstat->fc4NvmeInputRequests, 0);
2500 atomic_set(&cstat->fc4NvmeOutputRequests, 0);
2501 atomic_set(&cstat->fc4NvmeControlRequests, 0);
2502 atomic_set(&cstat->fc4NvmeIoCmpls, 0);
2505 /* Don't post more new bufs if repost already recovered
2508 if (phba->sli4_hba.nvme_xri_cnt == 0) {
2509 len = lpfc_new_nvme_buf(vport,
2510 phba->sli4_hba.nvme_xri_max);
2511 vport->phba->total_nvme_bufs += len;
2520 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2522 * The driver has to wait for the host nvme transport to callback
2523 * indicating the localport has successfully unregistered all
2524 * resources. Since this is an uninterruptible wait, loop every ten
2525 * seconds and print a message indicating no progress.
2527 * An uninterruptible wait is used because of the risk of transport-to-
2528 * driver state mismatch.
2531 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2532 struct lpfc_nvme_lport *lport)
2534 #if (IS_ENABLED(CONFIG_NVME_FC))
2538 /* Host transport has to clean up and confirm requiring an indefinite
2539 * wait. Print a message if a 10 second wait expires and renew the
2540 * wait. This is unexpected.
2542 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2544 ret = wait_for_completion_timeout(&lport->lport_unreg_done,
2546 if (unlikely(!ret)) {
2547 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2548 "6176 Lport %p Localport %p wait "
2549 "timed out. Renewing.\n",
2550 lport, vport->localport);
2555 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2556 "6177 Lport %p Localport %p Complete Success\n",
2557 lport, vport->localport);
2562 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2563 * @pnvme: pointer to lpfc nvme data structure.
2565 * This routine is invoked to destroy all lports bound to the phba.
2566 * The lport memory was allocated by the nvme fc transport and is
2567 * released there. This routine ensures all rports bound to the
2568 * lport have been disconnected.
2572 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2574 #if (IS_ENABLED(CONFIG_NVME_FC))
2575 struct nvme_fc_local_port *localport;
2576 struct lpfc_nvme_lport *lport;
2577 struct lpfc_nvme_ctrl_stat *cstat;
2580 if (vport->nvmei_support == 0)
2583 localport = vport->localport;
2584 vport->localport = NULL;
2585 lport = (struct lpfc_nvme_lport *)localport->private;
2586 cstat = lport->cstat;
2588 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2589 "6011 Destroying NVME localport %p\n",
2592 /* lport's rport list is clear. Unregister
2593 * lport and release resources.
2595 init_completion(&lport->lport_unreg_done);
2596 ret = nvme_fc_unregister_localport(localport);
2598 /* Wait for completion. This either blocks
2599 * indefinitely or succeeds
2601 lpfc_nvme_lport_unreg_wait(vport, lport);
2604 /* Regardless of the unregister upcall response, clear
2605 * nvmei_support. All rports are unregistered and the
2606 * driver will clean up.
2608 vport->nvmei_support = 0;
2610 lpfc_printf_vlog(vport,
2611 KERN_INFO, LOG_NVME_DISC,
2612 "6009 Unregistered lport Success\n");
2614 lpfc_printf_vlog(vport,
2615 KERN_INFO, LOG_NVME_DISC,
2616 "6010 Unregistered lport "
2617 "Failed, status x%x\n",
2624 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2626 #if (IS_ENABLED(CONFIG_NVME_FC))
2627 struct nvme_fc_local_port *localport;
2628 struct lpfc_nvme_lport *lport;
2630 localport = vport->localport;
2632 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2633 "6710 Update NVME fail. No localport\n");
2636 lport = (struct lpfc_nvme_lport *)localport->private;
2638 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2639 "6171 Update NVME fail. localP %p, No lport\n",
2643 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2644 "6012 Update NVME lport %p did x%x\n",
2645 localport, vport->fc_myDID);
2647 localport->port_id = vport->fc_myDID;
2648 if (localport->port_id == 0)
2649 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2651 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2653 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2654 "6030 bound lport %p to DID x%06x\n",
2655 lport, localport->port_id);
2660 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2662 #if (IS_ENABLED(CONFIG_NVME_FC))
2664 struct nvme_fc_local_port *localport;
2665 struct lpfc_nvme_lport *lport;
2666 struct lpfc_nvme_rport *rport;
2667 struct lpfc_nvme_rport *oldrport;
2668 struct nvme_fc_remote_port *remote_port;
2669 struct nvme_fc_port_info rpinfo;
2670 struct lpfc_nodelist *prev_ndlp = NULL;
2672 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2673 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2674 ndlp->nlp_DID, ndlp->nlp_type);
2676 localport = vport->localport;
2680 lport = (struct lpfc_nvme_lport *)localport->private;
2682 /* NVME rports are not preserved across devloss.
2683 * Just register this instance. Note, rpinfo->dev_loss_tmo
2684 * is left 0 to indicate accept transport defaults. The
2685 * driver communicates port role capabilities consistent
2686 * with the PRLI response data.
2688 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2689 rpinfo.port_id = ndlp->nlp_DID;
2690 if (ndlp->nlp_type & NLP_NVME_TARGET)
2691 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2692 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2693 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2695 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2696 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2698 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2699 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2701 spin_lock_irq(&vport->phba->hbalock);
2702 oldrport = lpfc_ndlp_get_nrport(ndlp);
2703 spin_unlock_irq(&vport->phba->hbalock);
2707 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2709 /* If the ndlp already has an nrport, this is just
2710 * a resume of the existing rport. Else this is a
2713 /* Guard against an unregister/reregister
2714 * race that leaves the WAIT flag set.
2716 spin_lock_irq(&vport->phba->hbalock);
2717 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2718 spin_unlock_irq(&vport->phba->hbalock);
2719 rport = remote_port->private;
2721 /* New remoteport record does not guarantee valid
2722 * host private memory area.
2724 prev_ndlp = oldrport->ndlp;
2725 if (oldrport == remote_port->private) {
2726 /* Same remoteport - ndlp should match.
2729 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2731 "6014 Rebinding lport to "
2732 "remoteport %p wwpn 0x%llx, "
2733 "Data: x%x x%x %p %p x%x x%06x\n",
2735 remote_port->port_name,
2736 remote_port->port_id,
2737 remote_port->port_role,
2745 /* Sever the ndlp<->rport association
2746 * before dropping the ndlp ref from
2749 spin_lock_irq(&vport->phba->hbalock);
2750 ndlp->nrport = NULL;
2751 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2752 spin_unlock_irq(&vport->phba->hbalock);
2754 rport->remoteport = NULL;
2756 /* Reference only removed if previous NDLP is no longer
2757 * active. It might be just a swap and removing the
2758 * reference would cause a premature cleanup.
2760 if (prev_ndlp && prev_ndlp != ndlp) {
2761 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2762 (!prev_ndlp->nrport))
2763 lpfc_nlp_put(prev_ndlp);
2767 /* Clean bind the rport to the ndlp. */
2768 rport->remoteport = remote_port;
2769 rport->lport = lport;
2771 spin_lock_irq(&vport->phba->hbalock);
2772 ndlp->nrport = rport;
2773 spin_unlock_irq(&vport->phba->hbalock);
2774 lpfc_printf_vlog(vport, KERN_INFO,
2775 LOG_NVME_DISC | LOG_NODE,
2776 "6022 Binding new rport to "
2777 "lport %p Remoteport %p rport %p WWNN 0x%llx, "
2778 "Rport WWPN 0x%llx DID "
2779 "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
2780 lport, remote_port, rport,
2781 rpinfo.node_name, rpinfo.port_name,
2782 rpinfo.port_id, rpinfo.port_role,
2785 lpfc_printf_vlog(vport, KERN_ERR,
2786 LOG_NVME_DISC | LOG_NODE,
2787 "6031 RemotePort Registration failed "
2788 "err: %d, DID x%06x\n",
2789 ret, ndlp->nlp_DID);
2798 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2800 * There is no notion of Devloss or rport recovery from the current
2801 * nvme_transport perspective. Loss of an rport just means IO cannot
2802 * be sent and recovery is completely up to the initator.
2803 * For now, the driver just unbinds the DID and port_role so that
2804 * no further IO can be issued. Changes are planned for later.
2806 * Notes - the ndlp reference count is not decremented here since
2807 * since there is no nvme_transport api for devloss. Node ref count
2808 * is only adjusted in driver unload.
2811 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2813 #if (IS_ENABLED(CONFIG_NVME_FC))
2815 struct nvme_fc_local_port *localport;
2816 struct lpfc_nvme_lport *lport;
2817 struct lpfc_nvme_rport *rport;
2818 struct nvme_fc_remote_port *remoteport = NULL;
2820 localport = vport->localport;
2822 /* This is fundamental error. The localport is always
2823 * available until driver unload. Just exit.
2828 lport = (struct lpfc_nvme_lport *)localport->private;
2832 spin_lock_irq(&vport->phba->hbalock);
2833 rport = lpfc_ndlp_get_nrport(ndlp);
2835 remoteport = rport->remoteport;
2836 spin_unlock_irq(&vport->phba->hbalock);
2840 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2841 "6033 Unreg nvme remoteport %p, portname x%llx, "
2842 "port_id x%06x, portstate x%x port type x%x\n",
2843 remoteport, remoteport->port_name,
2844 remoteport->port_id, remoteport->port_state,
2847 /* Sanity check ndlp type. Only call for NVME ports. Don't
2848 * clear any rport state until the transport calls back.
2851 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2852 /* No concern about the role change on the nvme remoteport.
2853 * The transport will update it.
2855 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2857 /* Don't let the host nvme transport keep sending keep-alives
2858 * on this remoteport. Vport is unloading, no recovery. The
2859 * return values is ignored. The upcall is a courtesy to the
2862 if (vport->load_flag & FC_UNLOADING)
2863 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2865 ret = nvme_fc_unregister_remoteport(remoteport);
2868 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2869 "6167 NVME unregister failed %d "
2871 ret, remoteport->port_state);
2878 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2879 "6168 State error: lport %p, rport%p FCID x%06x\n",
2880 vport->localport, ndlp->rport, ndlp->nlp_DID);
2884 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2885 * @phba: pointer to lpfc hba data structure.
2886 * @axri: pointer to the fcp xri abort wcqe structure.
2888 * This routine is invoked by the worker thread to process a SLI4 fast-path
2889 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2893 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2894 struct sli4_wcqe_xri_aborted *axri)
2896 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2897 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2898 struct nvmefc_fcp_req *nvme_cmd = NULL;
2899 struct lpfc_nodelist *ndlp;
2900 unsigned long iflag = 0;
2902 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2904 spin_lock_irqsave(&phba->hbalock, iflag);
2905 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2906 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2907 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
2909 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2910 list_del_init(&lpfc_ncmd->list);
2911 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2912 lpfc_ncmd->status = IOSTAT_SUCCESS;
2914 &phba->sli4_hba.abts_nvme_buf_list_lock);
2916 spin_unlock_irqrestore(&phba->hbalock, iflag);
2917 ndlp = lpfc_ncmd->ndlp;
2919 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2921 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2922 "6311 nvme_cmd %p xri x%x tag x%x "
2923 "abort complete and xri released\n",
2924 lpfc_ncmd->nvmeCmd, xri,
2925 lpfc_ncmd->cur_iocbq.iotag);
2927 /* Aborted NVME commands are required to not complete
2928 * before the abort exchange command fully completes.
2929 * Once completed, it is available via the put list.
2931 if (lpfc_ncmd->nvmeCmd) {
2932 nvme_cmd = lpfc_ncmd->nvmeCmd;
2933 nvme_cmd->done(nvme_cmd);
2934 lpfc_ncmd->nvmeCmd = NULL;
2936 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2940 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2941 spin_unlock_irqrestore(&phba->hbalock, iflag);
2943 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2944 "6312 XRI Aborted xri x%x not found\n", xri);
2949 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2950 * @phba: Pointer to HBA context object.
2952 * This function flushes all wqes in the nvme rings and frees all resources
2953 * in the txcmplq. This function does not issue abort wqes for the IO
2954 * commands in txcmplq, they will just be returned with
2955 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2956 * slot has been permanently disabled.
2959 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2961 struct lpfc_sli_ring *pring;
2962 u32 i, wait_cnt = 0;
2964 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
2967 /* Cycle through all NVME rings and make sure all outstanding
2968 * WQEs have been removed from the txcmplqs.
2970 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
2971 pring = phba->sli4_hba.nvme_wq[i]->pring;
2976 /* Retrieve everything on the txcmplq */
2977 while (!list_empty(&pring->txcmplq)) {
2978 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2981 /* The sleep is 10mS. Every ten seconds,
2982 * dump a message. Something is wrong.
2984 if ((wait_cnt % 1000) == 0) {
2985 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2986 "6178 NVME IO not empty, "
2987 "cnt %d\n", wait_cnt);