]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/lpfc/lpfc_nvmet.c
Merge tag 'for-linus-5.6-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git/hubca...
[linux.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60                                                  struct lpfc_nvmet_rcv_ctx *,
61                                                  dma_addr_t rspbuf,
62                                                  uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64                                                   struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66                                           struct lpfc_nvmet_rcv_ctx *,
67                                           uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69                                             struct lpfc_nvmet_rcv_ctx *,
70                                             uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72                                            struct lpfc_nvmet_rcv_ctx *,
73                                            uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75                                     struct lpfc_nvmet_rcv_ctx *);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
77
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
79
80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
83
84 /* Setup WQE templates for NVME IOs */
85 void
86 lpfc_nvmet_cmd_template(void)
87 {
88         union lpfc_wqe128 *wqe;
89
90         /* TSEND template */
91         wqe = &lpfc_tsend_cmd_template;
92         memset(wqe, 0, sizeof(union lpfc_wqe128));
93
94         /* Word 0, 1, 2 - BDE is variable */
95
96         /* Word 3 - payload_offset_len is zero */
97
98         /* Word 4 - relative_offset is variable */
99
100         /* Word 5 - is zero */
101
102         /* Word 6 - ctxt_tag, xri_tag is variable */
103
104         /* Word 7 - wqe_ar is variable */
105         bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
106         bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
107         bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
108         bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
109         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
110
111         /* Word 8 - abort_tag is variable */
112
113         /* Word 9  - reqtag, rcvoxid is variable */
114
115         /* Word 10 - wqes, xc is variable */
116         bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
117         bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
118         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
119         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
120         bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
121         bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
122
123         /* Word 11 - sup, irsp, irsplen is variable */
124         bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
125         bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
126         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
127         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
128         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
129         bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
130
131         /* Word 12 - fcp_data_len is variable */
132
133         /* Word 13, 14, 15 - PBDE is zero */
134
135         /* TRECEIVE template */
136         wqe = &lpfc_treceive_cmd_template;
137         memset(wqe, 0, sizeof(union lpfc_wqe128));
138
139         /* Word 0, 1, 2 - BDE is variable */
140
141         /* Word 3 */
142         wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
143
144         /* Word 4 - relative_offset is variable */
145
146         /* Word 5 - is zero */
147
148         /* Word 6 - ctxt_tag, xri_tag is variable */
149
150         /* Word 7 */
151         bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
152         bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
153         bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
154         bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
155         bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
156
157         /* Word 8 - abort_tag is variable */
158
159         /* Word 9  - reqtag, rcvoxid is variable */
160
161         /* Word 10 - xc is variable */
162         bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
163         bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
164         bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
165         bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
166         bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
167         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
168
169         /* Word 11 - pbde is variable */
170         bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
171         bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
172         bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
173         bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
174         bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
175         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
176
177         /* Word 12 - fcp_data_len is variable */
178
179         /* Word 13, 14, 15 - PBDE is variable */
180
181         /* TRSP template */
182         wqe = &lpfc_trsp_cmd_template;
183         memset(wqe, 0, sizeof(union lpfc_wqe128));
184
185         /* Word 0, 1, 2 - BDE is variable */
186
187         /* Word 3 - response_len is variable */
188
189         /* Word 4, 5 - is zero */
190
191         /* Word 6 - ctxt_tag, xri_tag is variable */
192
193         /* Word 7 */
194         bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
195         bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
196         bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
197         bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
198         bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
199
200         /* Word 8 - abort_tag is variable */
201
202         /* Word 9  - reqtag is variable */
203
204         /* Word 10 wqes, xc is variable */
205         bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
206         bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
207         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
208         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
209         bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
210         bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
211
212         /* Word 11 irsp, irsplen is variable */
213         bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
214         bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
215         bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
216         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
217         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
218         bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
219
220         /* Word 12, 13, 14, 15 - is zero */
221 }
222
223 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
224 static struct lpfc_nvmet_rcv_ctx *
225 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
226 {
227         struct lpfc_nvmet_rcv_ctx *ctxp;
228         unsigned long iflag;
229         bool found = false;
230
231         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
232         list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
233                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
234                         continue;
235
236                 found = true;
237                 break;
238         }
239         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
240         if (found)
241                 return ctxp;
242
243         return NULL;
244 }
245
246 static struct lpfc_nvmet_rcv_ctx *
247 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
248 {
249         struct lpfc_nvmet_rcv_ctx *ctxp;
250         unsigned long iflag;
251         bool found = false;
252
253         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
254         list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
255                 if (ctxp->oxid != oxid || ctxp->sid != sid)
256                         continue;
257
258                 found = true;
259                 break;
260         }
261         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
262         if (found)
263                 return ctxp;
264
265         return NULL;
266 }
267 #endif
268
269 static void
270 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
271 {
272         lockdep_assert_held(&ctxp->ctxlock);
273
274         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
275                         "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
276                         ctxp->oxid, ctxp->flag);
277
278         if (ctxp->flag & LPFC_NVMET_CTX_RLS)
279                 return;
280
281         ctxp->flag |= LPFC_NVMET_CTX_RLS;
282         spin_lock(&phba->sli4_hba.t_active_list_lock);
283         list_del(&ctxp->list);
284         spin_unlock(&phba->sli4_hba.t_active_list_lock);
285         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
286         list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
287         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
288 }
289
290 /**
291  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
292  * @phba: Pointer to HBA context object.
293  * @cmdwqe: Pointer to driver command WQE object.
294  * @wcqe: Pointer to driver response CQE object.
295  *
296  * The function is called from SLI ring event handler with no
297  * lock held. This function is the completion handler for NVME LS commands
298  * The function frees memory resources used for the NVME commands.
299  **/
300 static void
301 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
302                           struct lpfc_wcqe_complete *wcqe)
303 {
304         struct lpfc_nvmet_tgtport *tgtp;
305         struct nvmefc_tgt_ls_req *rsp;
306         struct lpfc_nvmet_rcv_ctx *ctxp;
307         uint32_t status, result;
308
309         status = bf_get(lpfc_wcqe_c_status, wcqe);
310         result = wcqe->parameter;
311         ctxp = cmdwqe->context2;
312
313         if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
314                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
315                                 "6410 NVMET LS cmpl state mismatch IO x%x: "
316                                 "%d %d\n",
317                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
318         }
319
320         if (!phba->targetport)
321                 goto out;
322
323         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
324
325         if (tgtp) {
326                 if (status) {
327                         atomic_inc(&tgtp->xmt_ls_rsp_error);
328                         if (result == IOERR_ABORT_REQUESTED)
329                                 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
330                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
331                                 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
332                 } else {
333                         atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
334                 }
335         }
336
337 out:
338         rsp = &ctxp->ctx.ls_req;
339
340         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
341                          ctxp->oxid, status, result);
342
343         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
344                         "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
345                         status, result, ctxp->oxid);
346
347         lpfc_nlp_put(cmdwqe->context1);
348         cmdwqe->context2 = NULL;
349         cmdwqe->context3 = NULL;
350         lpfc_sli_release_iocbq(phba, cmdwqe);
351         rsp->done(rsp);
352         kfree(ctxp);
353 }
354
355 /**
356  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
357  * @phba: HBA buffer is associated with
358  * @ctxp: context to clean up
359  * @mp: Buffer to free
360  *
361  * Description: Frees the given DMA buffer in the appropriate way given by
362  * reposting it to its associated RQ so it can be reused.
363  *
364  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
365  *
366  * Returns: None
367  **/
368 void
369 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
370 {
371 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
372         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
373         struct lpfc_nvmet_tgtport *tgtp;
374         struct fc_frame_header *fc_hdr;
375         struct rqb_dmabuf *nvmebuf;
376         struct lpfc_nvmet_ctx_info *infop;
377         uint32_t size, oxid, sid;
378         int cpu;
379         unsigned long iflag;
380
381         if (ctxp->state == LPFC_NVMET_STE_FREE) {
382                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
383                                 "6411 NVMET free, already free IO x%x: %d %d\n",
384                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
385         }
386
387         if (ctxp->rqb_buffer) {
388                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
389                 nvmebuf = ctxp->rqb_buffer;
390                 /* check if freed in another path whilst acquiring lock */
391                 if (nvmebuf) {
392                         ctxp->rqb_buffer = NULL;
393                         if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
394                                 ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
395                                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
396                                 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
397                                                                     nvmebuf);
398                         } else {
399                                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
400                                 /* repost */
401                                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
402                         }
403                 } else {
404                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
405                 }
406         }
407         ctxp->state = LPFC_NVMET_STE_FREE;
408
409         spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
410         if (phba->sli4_hba.nvmet_io_wait_cnt) {
411                 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
412                                  nvmebuf, struct rqb_dmabuf,
413                                  hbuf.list);
414                 phba->sli4_hba.nvmet_io_wait_cnt--;
415                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
416                                        iflag);
417
418                 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
419                 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
420                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
421                 size = nvmebuf->bytes_recv;
422                 sid = sli4_sid_from_fc_hdr(fc_hdr);
423
424                 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
425                 ctxp->wqeq = NULL;
426                 ctxp->offset = 0;
427                 ctxp->phba = phba;
428                 ctxp->size = size;
429                 ctxp->oxid = oxid;
430                 ctxp->sid = sid;
431                 ctxp->state = LPFC_NVMET_STE_RCV;
432                 ctxp->entry_cnt = 1;
433                 ctxp->flag = 0;
434                 ctxp->ctxbuf = ctx_buf;
435                 ctxp->rqb_buffer = (void *)nvmebuf;
436                 spin_lock_init(&ctxp->ctxlock);
437
438 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
439                 /* NOTE: isr time stamp is stale when context is re-assigned*/
440                 if (ctxp->ts_isr_cmd) {
441                         ctxp->ts_cmd_nvme = 0;
442                         ctxp->ts_nvme_data = 0;
443                         ctxp->ts_data_wqput = 0;
444                         ctxp->ts_isr_data = 0;
445                         ctxp->ts_data_nvme = 0;
446                         ctxp->ts_nvme_status = 0;
447                         ctxp->ts_status_wqput = 0;
448                         ctxp->ts_isr_status = 0;
449                         ctxp->ts_status_nvme = 0;
450                 }
451 #endif
452                 atomic_inc(&tgtp->rcv_fcp_cmd_in);
453
454                 /* Indicate that a replacement buffer has been posted */
455                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
456                 ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
457                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
458
459                 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
460                         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
461                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
462                                         "6181 Unable to queue deferred work "
463                                         "for oxid x%x. "
464                                         "FCP Drop IO [x%x x%x x%x]\n",
465                                         ctxp->oxid,
466                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
467                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
468                                         atomic_read(&tgtp->xmt_fcp_release));
469
470                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
471                         lpfc_nvmet_defer_release(phba, ctxp);
472                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
473                         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
474                 }
475                 return;
476         }
477         spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
478
479         /*
480          * Use the CPU context list, from the MRQ the IO was received on
481          * (ctxp->idx), to save context structure.
482          */
483         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
484         list_del_init(&ctxp->list);
485         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
486         cpu = raw_smp_processor_id();
487         infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
488         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
489         list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
490         infop->nvmet_ctx_list_cnt++;
491         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
492 #endif
493 }
494
495 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
496 static void
497 lpfc_nvmet_ktime(struct lpfc_hba *phba,
498                  struct lpfc_nvmet_rcv_ctx *ctxp)
499 {
500         uint64_t seg1, seg2, seg3, seg4, seg5;
501         uint64_t seg6, seg7, seg8, seg9, seg10;
502         uint64_t segsum;
503
504         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
505             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
506             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
507             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
508             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
509                 return;
510
511         if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
512                 return;
513         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
514                 return;
515         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
516                 return;
517         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
518                 return;
519         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
520                 return;
521         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
522                 return;
523         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
524                 return;
525         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
526                 return;
527         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
528                 return;
529         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
530                 return;
531         /*
532          * Segment 1 - Time from FCP command received by MSI-X ISR
533          * to FCP command is passed to NVME Layer.
534          * Segment 2 - Time from FCP command payload handed
535          * off to NVME Layer to Driver receives a Command op
536          * from NVME Layer.
537          * Segment 3 - Time from Driver receives a Command op
538          * from NVME Layer to Command is put on WQ.
539          * Segment 4 - Time from Driver WQ put is done
540          * to MSI-X ISR for Command cmpl.
541          * Segment 5 - Time from MSI-X ISR for Command cmpl to
542          * Command cmpl is passed to NVME Layer.
543          * Segment 6 - Time from Command cmpl is passed to NVME
544          * Layer to Driver receives a RSP op from NVME Layer.
545          * Segment 7 - Time from Driver receives a RSP op from
546          * NVME Layer to WQ put is done on TRSP FCP Status.
547          * Segment 8 - Time from Driver WQ put is done on TRSP
548          * FCP Status to MSI-X ISR for TRSP cmpl.
549          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
550          * TRSP cmpl is passed to NVME Layer.
551          * Segment 10 - Time from FCP command received by
552          * MSI-X ISR to command is completed on wire.
553          * (Segments 1 thru 8) for READDATA / WRITEDATA
554          * (Segments 1 thru 4) for READDATA_RSP
555          */
556         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
557         segsum = seg1;
558
559         seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
560         if (segsum > seg2)
561                 return;
562         seg2 -= segsum;
563         segsum += seg2;
564
565         seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
566         if (segsum > seg3)
567                 return;
568         seg3 -= segsum;
569         segsum += seg3;
570
571         seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
572         if (segsum > seg4)
573                 return;
574         seg4 -= segsum;
575         segsum += seg4;
576
577         seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
578         if (segsum > seg5)
579                 return;
580         seg5 -= segsum;
581         segsum += seg5;
582
583
584         /* For auto rsp commands seg6 thru seg10 will be 0 */
585         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
586                 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
587                 if (segsum > seg6)
588                         return;
589                 seg6 -= segsum;
590                 segsum += seg6;
591
592                 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
593                 if (segsum > seg7)
594                         return;
595                 seg7 -= segsum;
596                 segsum += seg7;
597
598                 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
599                 if (segsum > seg8)
600                         return;
601                 seg8 -= segsum;
602                 segsum += seg8;
603
604                 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
605                 if (segsum > seg9)
606                         return;
607                 seg9 -= segsum;
608                 segsum += seg9;
609
610                 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
611                         return;
612                 seg10 = (ctxp->ts_isr_status -
613                         ctxp->ts_isr_cmd);
614         } else {
615                 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
616                         return;
617                 seg6 =  0;
618                 seg7 =  0;
619                 seg8 =  0;
620                 seg9 =  0;
621                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
622         }
623
624         phba->ktime_seg1_total += seg1;
625         if (seg1 < phba->ktime_seg1_min)
626                 phba->ktime_seg1_min = seg1;
627         else if (seg1 > phba->ktime_seg1_max)
628                 phba->ktime_seg1_max = seg1;
629
630         phba->ktime_seg2_total += seg2;
631         if (seg2 < phba->ktime_seg2_min)
632                 phba->ktime_seg2_min = seg2;
633         else if (seg2 > phba->ktime_seg2_max)
634                 phba->ktime_seg2_max = seg2;
635
636         phba->ktime_seg3_total += seg3;
637         if (seg3 < phba->ktime_seg3_min)
638                 phba->ktime_seg3_min = seg3;
639         else if (seg3 > phba->ktime_seg3_max)
640                 phba->ktime_seg3_max = seg3;
641
642         phba->ktime_seg4_total += seg4;
643         if (seg4 < phba->ktime_seg4_min)
644                 phba->ktime_seg4_min = seg4;
645         else if (seg4 > phba->ktime_seg4_max)
646                 phba->ktime_seg4_max = seg4;
647
648         phba->ktime_seg5_total += seg5;
649         if (seg5 < phba->ktime_seg5_min)
650                 phba->ktime_seg5_min = seg5;
651         else if (seg5 > phba->ktime_seg5_max)
652                 phba->ktime_seg5_max = seg5;
653
654         phba->ktime_data_samples++;
655         if (!seg6)
656                 goto out;
657
658         phba->ktime_seg6_total += seg6;
659         if (seg6 < phba->ktime_seg6_min)
660                 phba->ktime_seg6_min = seg6;
661         else if (seg6 > phba->ktime_seg6_max)
662                 phba->ktime_seg6_max = seg6;
663
664         phba->ktime_seg7_total += seg7;
665         if (seg7 < phba->ktime_seg7_min)
666                 phba->ktime_seg7_min = seg7;
667         else if (seg7 > phba->ktime_seg7_max)
668                 phba->ktime_seg7_max = seg7;
669
670         phba->ktime_seg8_total += seg8;
671         if (seg8 < phba->ktime_seg8_min)
672                 phba->ktime_seg8_min = seg8;
673         else if (seg8 > phba->ktime_seg8_max)
674                 phba->ktime_seg8_max = seg8;
675
676         phba->ktime_seg9_total += seg9;
677         if (seg9 < phba->ktime_seg9_min)
678                 phba->ktime_seg9_min = seg9;
679         else if (seg9 > phba->ktime_seg9_max)
680                 phba->ktime_seg9_max = seg9;
681 out:
682         phba->ktime_seg10_total += seg10;
683         if (seg10 < phba->ktime_seg10_min)
684                 phba->ktime_seg10_min = seg10;
685         else if (seg10 > phba->ktime_seg10_max)
686                 phba->ktime_seg10_max = seg10;
687         phba->ktime_status_samples++;
688 }
689 #endif
690
691 /**
692  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
693  * @phba: Pointer to HBA context object.
694  * @cmdwqe: Pointer to driver command WQE object.
695  * @wcqe: Pointer to driver response CQE object.
696  *
697  * The function is called from SLI ring event handler with no
698  * lock held. This function is the completion handler for NVME FCP commands
699  * The function frees memory resources used for the NVME commands.
700  **/
701 static void
702 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
703                           struct lpfc_wcqe_complete *wcqe)
704 {
705         struct lpfc_nvmet_tgtport *tgtp;
706         struct nvmefc_tgt_fcp_req *rsp;
707         struct lpfc_nvmet_rcv_ctx *ctxp;
708         uint32_t status, result, op, start_clean, logerr;
709 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
710         uint32_t id;
711 #endif
712
713         ctxp = cmdwqe->context2;
714         ctxp->flag &= ~LPFC_NVMET_IO_INP;
715
716         rsp = &ctxp->ctx.fcp_req;
717         op = rsp->op;
718
719         status = bf_get(lpfc_wcqe_c_status, wcqe);
720         result = wcqe->parameter;
721
722         if (phba->targetport)
723                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
724         else
725                 tgtp = NULL;
726
727         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
728                          ctxp->oxid, op, status);
729
730         if (status) {
731                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
732                 rsp->transferred_length = 0;
733                 if (tgtp) {
734                         atomic_inc(&tgtp->xmt_fcp_rsp_error);
735                         if (result == IOERR_ABORT_REQUESTED)
736                                 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
737                 }
738
739                 logerr = LOG_NVME_IOERR;
740
741                 /* pick up SLI4 exhange busy condition */
742                 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
743                         ctxp->flag |= LPFC_NVMET_XBUSY;
744                         logerr |= LOG_NVME_ABTS;
745                         if (tgtp)
746                                 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
747
748                 } else {
749                         ctxp->flag &= ~LPFC_NVMET_XBUSY;
750                 }
751
752                 lpfc_printf_log(phba, KERN_INFO, logerr,
753                                 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
754                                 "XBUSY:x%x\n",
755                                 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
756                                 status, result, ctxp->flag);
757
758         } else {
759                 rsp->fcp_error = NVME_SC_SUCCESS;
760                 if (op == NVMET_FCOP_RSP)
761                         rsp->transferred_length = rsp->rsplen;
762                 else
763                         rsp->transferred_length = rsp->transfer_length;
764                 if (tgtp)
765                         atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
766         }
767
768         if ((op == NVMET_FCOP_READDATA_RSP) ||
769             (op == NVMET_FCOP_RSP)) {
770                 /* Sanity check */
771                 ctxp->state = LPFC_NVMET_STE_DONE;
772                 ctxp->entry_cnt++;
773
774 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
775                 if (ctxp->ts_cmd_nvme) {
776                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
777                                 ctxp->ts_isr_data =
778                                         cmdwqe->isr_timestamp;
779                                 ctxp->ts_data_nvme =
780                                         ktime_get_ns();
781                                 ctxp->ts_nvme_status =
782                                         ctxp->ts_data_nvme;
783                                 ctxp->ts_status_wqput =
784                                         ctxp->ts_data_nvme;
785                                 ctxp->ts_isr_status =
786                                         ctxp->ts_data_nvme;
787                                 ctxp->ts_status_nvme =
788                                         ctxp->ts_data_nvme;
789                         } else {
790                                 ctxp->ts_isr_status =
791                                         cmdwqe->isr_timestamp;
792                                 ctxp->ts_status_nvme =
793                                         ktime_get_ns();
794                         }
795                 }
796 #endif
797                 rsp->done(rsp);
798 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
799                 if (ctxp->ts_cmd_nvme)
800                         lpfc_nvmet_ktime(phba, ctxp);
801 #endif
802                 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
803         } else {
804                 ctxp->entry_cnt++;
805                 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
806                 memset(((char *)cmdwqe) + start_clean, 0,
807                        (sizeof(struct lpfc_iocbq) - start_clean));
808 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
809                 if (ctxp->ts_cmd_nvme) {
810                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
811                         ctxp->ts_data_nvme = ktime_get_ns();
812                 }
813 #endif
814                 rsp->done(rsp);
815         }
816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
817         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
818                 id = raw_smp_processor_id();
819                 if (id < LPFC_CHECK_CPU_CNT) {
820                         if (ctxp->cpu != id)
821                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
822                                                 "6704 CPU Check cmdcmpl: "
823                                                 "cpu %d expect %d\n",
824                                                 id, ctxp->cpu);
825                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
826                 }
827         }
828 #endif
829 }
830
831 static int
832 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
833                       struct nvmefc_tgt_ls_req *rsp)
834 {
835         struct lpfc_nvmet_rcv_ctx *ctxp =
836                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
837         struct lpfc_hba *phba = ctxp->phba;
838         struct hbq_dmabuf *nvmebuf =
839                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
840         struct lpfc_iocbq *nvmewqeq;
841         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
842         struct lpfc_dmabuf dmabuf;
843         struct ulp_bde64 bpl;
844         int rc;
845
846         if (phba->pport->load_flag & FC_UNLOADING)
847                 return -ENODEV;
848
849         if (phba->pport->load_flag & FC_UNLOADING)
850                 return -ENODEV;
851
852         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
853                         "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
854
855         if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
856             (ctxp->entry_cnt != 1)) {
857                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
858                                 "6412 NVMET LS rsp state mismatch "
859                                 "oxid x%x: %d %d\n",
860                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
861         }
862         ctxp->state = LPFC_NVMET_STE_LS_RSP;
863         ctxp->entry_cnt++;
864
865         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
866                                       rsp->rsplen);
867         if (nvmewqeq == NULL) {
868                 atomic_inc(&nvmep->xmt_ls_drop);
869                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
870                                 "6150 LS Drop IO x%x: Prep\n",
871                                 ctxp->oxid);
872                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
873                 atomic_inc(&nvmep->xmt_ls_abort);
874                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
875                                                 ctxp->sid, ctxp->oxid);
876                 return -ENOMEM;
877         }
878
879         /* Save numBdes for bpl2sgl */
880         nvmewqeq->rsvd2 = 1;
881         nvmewqeq->hba_wqidx = 0;
882         nvmewqeq->context3 = &dmabuf;
883         dmabuf.virt = &bpl;
884         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
885         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
886         bpl.tus.f.bdeSize = rsp->rsplen;
887         bpl.tus.f.bdeFlags = 0;
888         bpl.tus.w = le32_to_cpu(bpl.tus.w);
889
890         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
891         nvmewqeq->iocb_cmpl = NULL;
892         nvmewqeq->context2 = ctxp;
893
894         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
895                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
896
897         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
898         if (rc == WQE_SUCCESS) {
899                 /*
900                  * Okay to repost buffer here, but wait till cmpl
901                  * before freeing ctxp and iocbq.
902                  */
903                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
904                 atomic_inc(&nvmep->xmt_ls_rsp);
905                 return 0;
906         }
907         /* Give back resources */
908         atomic_inc(&nvmep->xmt_ls_drop);
909         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
910                         "6151 LS Drop IO x%x: Issue %d\n",
911                         ctxp->oxid, rc);
912
913         lpfc_nlp_put(nvmewqeq->context1);
914
915         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
916         atomic_inc(&nvmep->xmt_ls_abort);
917         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
918         return -ENXIO;
919 }
920
921 static int
922 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
923                       struct nvmefc_tgt_fcp_req *rsp)
924 {
925         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
926         struct lpfc_nvmet_rcv_ctx *ctxp =
927                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
928         struct lpfc_hba *phba = ctxp->phba;
929         struct lpfc_queue *wq;
930         struct lpfc_iocbq *nvmewqeq;
931         struct lpfc_sli_ring *pring;
932         unsigned long iflags;
933         int rc;
934
935         if (phba->pport->load_flag & FC_UNLOADING) {
936                 rc = -ENODEV;
937                 goto aerr;
938         }
939
940         if (phba->pport->load_flag & FC_UNLOADING) {
941                 rc = -ENODEV;
942                 goto aerr;
943         }
944
945 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
946         if (ctxp->ts_cmd_nvme) {
947                 if (rsp->op == NVMET_FCOP_RSP)
948                         ctxp->ts_nvme_status = ktime_get_ns();
949                 else
950                         ctxp->ts_nvme_data = ktime_get_ns();
951         }
952
953         /* Setup the hdw queue if not already set */
954         if (!ctxp->hdwq)
955                 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
956
957         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
958                 int id = raw_smp_processor_id();
959                 if (id < LPFC_CHECK_CPU_CNT) {
960                         if (rsp->hwqid != id)
961                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
962                                                 "6705 CPU Check OP: "
963                                                 "cpu %d expect %d\n",
964                                                 id, rsp->hwqid);
965                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
966                 }
967                 ctxp->cpu = id; /* Setup cpu for cmpl check */
968         }
969 #endif
970
971         /* Sanity check */
972         if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
973             (ctxp->state == LPFC_NVMET_STE_ABORT)) {
974                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
975                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
976                                 "6102 IO oxid x%x aborted\n",
977                                 ctxp->oxid);
978                 rc = -ENXIO;
979                 goto aerr;
980         }
981
982         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
983         if (nvmewqeq == NULL) {
984                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
985                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
986                                 "6152 FCP Drop IO x%x: Prep\n",
987                                 ctxp->oxid);
988                 rc = -ENXIO;
989                 goto aerr;
990         }
991
992         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
993         nvmewqeq->iocb_cmpl = NULL;
994         nvmewqeq->context2 = ctxp;
995         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
996         ctxp->wqeq->hba_wqidx = rsp->hwqid;
997
998         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
999                          ctxp->oxid, rsp->op, rsp->rsplen);
1000
1001         ctxp->flag |= LPFC_NVMET_IO_INP;
1002         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1003         if (rc == WQE_SUCCESS) {
1004 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1005                 if (!ctxp->ts_cmd_nvme)
1006                         return 0;
1007                 if (rsp->op == NVMET_FCOP_RSP)
1008                         ctxp->ts_status_wqput = ktime_get_ns();
1009                 else
1010                         ctxp->ts_data_wqput = ktime_get_ns();
1011 #endif
1012                 return 0;
1013         }
1014
1015         if (rc == -EBUSY) {
1016                 /*
1017                  * WQ was full, so queue nvmewqeq to be sent after
1018                  * WQE release CQE
1019                  */
1020                 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
1021                 wq = ctxp->hdwq->io_wq;
1022                 pring = wq->pring;
1023                 spin_lock_irqsave(&pring->ring_lock, iflags);
1024                 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1025                 wq->q_flag |= HBA_NVMET_WQFULL;
1026                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1027                 atomic_inc(&lpfc_nvmep->defer_wqfull);
1028                 return 0;
1029         }
1030
1031         /* Give back resources */
1032         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1033         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1034                         "6153 FCP Drop IO x%x: Issue: %d\n",
1035                         ctxp->oxid, rc);
1036
1037         ctxp->wqeq->hba_wqidx = 0;
1038         nvmewqeq->context2 = NULL;
1039         nvmewqeq->context3 = NULL;
1040         rc = -EBUSY;
1041 aerr:
1042         return rc;
1043 }
1044
1045 static void
1046 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1047 {
1048         struct lpfc_nvmet_tgtport *tport = targetport->private;
1049
1050         /* release any threads waiting for the unreg to complete */
1051         if (tport->phba->targetport)
1052                 complete(tport->tport_unreg_cmp);
1053 }
1054
1055 static void
1056 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1057                          struct nvmefc_tgt_fcp_req *req)
1058 {
1059         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1060         struct lpfc_nvmet_rcv_ctx *ctxp =
1061                 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1062         struct lpfc_hba *phba = ctxp->phba;
1063         struct lpfc_queue *wq;
1064         unsigned long flags;
1065
1066         if (phba->pport->load_flag & FC_UNLOADING)
1067                 return;
1068
1069         if (phba->pport->load_flag & FC_UNLOADING)
1070                 return;
1071
1072         if (!ctxp->hdwq)
1073                 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1074
1075         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1076                         "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1077                         ctxp->oxid, ctxp->flag, ctxp->state);
1078
1079         lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1080                          ctxp->oxid, ctxp->flag, ctxp->state);
1081
1082         atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1083
1084         spin_lock_irqsave(&ctxp->ctxlock, flags);
1085
1086         /* Since iaab/iaar are NOT set, we need to check
1087          * if the firmware is in process of aborting IO
1088          */
1089         if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
1090                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1091                 return;
1092         }
1093         ctxp->flag |= LPFC_NVMET_ABORT_OP;
1094
1095         if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1096                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1097                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1098                                                  ctxp->oxid);
1099                 wq = ctxp->hdwq->io_wq;
1100                 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1101                 return;
1102         }
1103         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1104
1105         /* An state of LPFC_NVMET_STE_RCV means we have just received
1106          * the NVME command and have not started processing it.
1107          * (by issuing any IO WQEs on this exchange yet)
1108          */
1109         if (ctxp->state == LPFC_NVMET_STE_RCV)
1110                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1111                                                  ctxp->oxid);
1112         else
1113                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1114                                                ctxp->oxid);
1115 }
1116
1117 static void
1118 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1119                            struct nvmefc_tgt_fcp_req *rsp)
1120 {
1121         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1122         struct lpfc_nvmet_rcv_ctx *ctxp =
1123                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1124         struct lpfc_hba *phba = ctxp->phba;
1125         unsigned long flags;
1126         bool aborting = false;
1127
1128         spin_lock_irqsave(&ctxp->ctxlock, flags);
1129         if (ctxp->flag & LPFC_NVMET_XBUSY)
1130                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1131                                 "6027 NVMET release with XBUSY flag x%x"
1132                                 " oxid x%x\n",
1133                                 ctxp->flag, ctxp->oxid);
1134         else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1135                  ctxp->state != LPFC_NVMET_STE_ABORT)
1136                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1137                                 "6413 NVMET release bad state %d %d oxid x%x\n",
1138                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1139
1140         if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1141             (ctxp->flag & LPFC_NVMET_XBUSY)) {
1142                 aborting = true;
1143                 /* let the abort path do the real release */
1144                 lpfc_nvmet_defer_release(phba, ctxp);
1145         }
1146         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1147
1148         lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1149                          ctxp->state, aborting);
1150
1151         atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1152         ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
1153
1154         if (aborting)
1155                 return;
1156
1157         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1158 }
1159
1160 static void
1161 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1162                      struct nvmefc_tgt_fcp_req *rsp)
1163 {
1164         struct lpfc_nvmet_tgtport *tgtp;
1165         struct lpfc_nvmet_rcv_ctx *ctxp =
1166                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1167         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1168         struct lpfc_hba *phba = ctxp->phba;
1169         unsigned long iflag;
1170
1171
1172         lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1173                          ctxp->oxid, ctxp->size, raw_smp_processor_id());
1174
1175         if (!nvmebuf) {
1176                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1177                                 "6425 Defer rcv: no buffer oxid x%x: "
1178                                 "flg %x ste %x\n",
1179                                 ctxp->oxid, ctxp->flag, ctxp->state);
1180                 return;
1181         }
1182
1183         tgtp = phba->targetport->private;
1184         if (tgtp)
1185                 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1186
1187         /* Free the nvmebuf since a new buffer already replaced it */
1188         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1189         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1190         ctxp->rqb_buffer = NULL;
1191         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1192 }
1193
1194 static void
1195 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1196 {
1197         struct lpfc_nvmet_tgtport *tgtp;
1198         struct lpfc_hba *phba;
1199         uint32_t rc;
1200
1201         tgtp = tgtport->private;
1202         phba = tgtp->phba;
1203
1204         rc = lpfc_issue_els_rscn(phba->pport, 0);
1205         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1206                         "6420 NVMET subsystem change: Notification %s\n",
1207                         (rc) ? "Failed" : "Sent");
1208 }
1209
1210 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1211         .targetport_delete = lpfc_nvmet_targetport_delete,
1212         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1213         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
1214         .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1215         .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1216         .defer_rcv      = lpfc_nvmet_defer_rcv,
1217         .discovery_event = lpfc_nvmet_discovery_event,
1218
1219         .max_hw_queues  = 1,
1220         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1221         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1222         .dma_boundary = 0xFFFFFFFF,
1223
1224         /* optional features */
1225         .target_features = 0,
1226         /* sizes of additional private data for data structures */
1227         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1228 };
1229
1230 static void
1231 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1232                 struct lpfc_nvmet_ctx_info *infop)
1233 {
1234         struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1235         unsigned long flags;
1236
1237         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1238         list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1239                                 &infop->nvmet_ctx_list, list) {
1240                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1241                 list_del_init(&ctx_buf->list);
1242                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1243
1244                 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1245                 ctx_buf->sglq->state = SGL_FREED;
1246                 ctx_buf->sglq->ndlp = NULL;
1247
1248                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1249                 list_add_tail(&ctx_buf->sglq->list,
1250                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1251                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1252
1253                 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1254                 kfree(ctx_buf->context);
1255         }
1256         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1257 }
1258
1259 static void
1260 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1261 {
1262         struct lpfc_nvmet_ctx_info *infop;
1263         int i, j;
1264
1265         /* The first context list, MRQ 0 CPU 0 */
1266         infop = phba->sli4_hba.nvmet_ctx_info;
1267         if (!infop)
1268                 return;
1269
1270         /* Cycle the the entire CPU context list for every MRQ */
1271         for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1272                 for_each_present_cpu(j) {
1273                         infop = lpfc_get_ctx_list(phba, j, i);
1274                         __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1275                 }
1276         }
1277         kfree(phba->sli4_hba.nvmet_ctx_info);
1278         phba->sli4_hba.nvmet_ctx_info = NULL;
1279 }
1280
1281 static int
1282 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1283 {
1284         struct lpfc_nvmet_ctxbuf *ctx_buf;
1285         struct lpfc_iocbq *nvmewqe;
1286         union lpfc_wqe128 *wqe;
1287         struct lpfc_nvmet_ctx_info *last_infop;
1288         struct lpfc_nvmet_ctx_info *infop;
1289         int i, j, idx, cpu;
1290
1291         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1292                         "6403 Allocate NVMET resources for %d XRIs\n",
1293                         phba->sli4_hba.nvmet_xri_cnt);
1294
1295         phba->sli4_hba.nvmet_ctx_info = kcalloc(
1296                 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1297                 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1298         if (!phba->sli4_hba.nvmet_ctx_info) {
1299                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1300                                 "6419 Failed allocate memory for "
1301                                 "nvmet context lists\n");
1302                 return -ENOMEM;
1303         }
1304
1305         /*
1306          * Assuming X CPUs in the system, and Y MRQs, allocate some
1307          * lpfc_nvmet_ctx_info structures as follows:
1308          *
1309          * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1310          * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1311          * ...
1312          * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1313          *
1314          * Each line represents a MRQ "silo" containing an entry for
1315          * every CPU.
1316          *
1317          * MRQ X is initially assumed to be associated with CPU X, thus
1318          * contexts are initially distributed across all MRQs using
1319          * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1320          * freed, the are freed to the MRQ silo based on the CPU number
1321          * of the IO completion. Thus a context that was allocated for MRQ A
1322          * whose IO completed on CPU B will be freed to cpuB/mrqA.
1323          */
1324         for_each_possible_cpu(i) {
1325                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1326                         infop = lpfc_get_ctx_list(phba, i, j);
1327                         INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1328                         spin_lock_init(&infop->nvmet_ctx_list_lock);
1329                         infop->nvmet_ctx_list_cnt = 0;
1330                 }
1331         }
1332
1333         /*
1334          * Setup the next CPU context info ptr for each MRQ.
1335          * MRQ 0 will cycle thru CPUs 0 - X separately from
1336          * MRQ 1 cycling thru CPUs 0 - X, and so on.
1337          */
1338         for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1339                 last_infop = lpfc_get_ctx_list(phba,
1340                                                cpumask_first(cpu_present_mask),
1341                                                j);
1342                 for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1343                         infop = lpfc_get_ctx_list(phba, i, j);
1344                         infop->nvmet_ctx_next_cpu = last_infop;
1345                         last_infop = infop;
1346                 }
1347         }
1348
1349         /* For all nvmet xris, allocate resources needed to process a
1350          * received command on a per xri basis.
1351          */
1352         idx = 0;
1353         cpu = cpumask_first(cpu_present_mask);
1354         for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1355                 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1356                 if (!ctx_buf) {
1357                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1358                                         "6404 Ran out of memory for NVMET\n");
1359                         return -ENOMEM;
1360                 }
1361
1362                 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1363                                            GFP_KERNEL);
1364                 if (!ctx_buf->context) {
1365                         kfree(ctx_buf);
1366                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1367                                         "6405 Ran out of NVMET "
1368                                         "context memory\n");
1369                         return -ENOMEM;
1370                 }
1371                 ctx_buf->context->ctxbuf = ctx_buf;
1372                 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1373
1374                 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1375                 if (!ctx_buf->iocbq) {
1376                         kfree(ctx_buf->context);
1377                         kfree(ctx_buf);
1378                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1379                                         "6406 Ran out of NVMET iocb/WQEs\n");
1380                         return -ENOMEM;
1381                 }
1382                 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1383                 nvmewqe = ctx_buf->iocbq;
1384                 wqe = &nvmewqe->wqe;
1385
1386                 /* Initialize WQE */
1387                 memset(wqe, 0, sizeof(union lpfc_wqe));
1388
1389                 ctx_buf->iocbq->context1 = NULL;
1390                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1391                 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1392                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1393                 if (!ctx_buf->sglq) {
1394                         lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1395                         kfree(ctx_buf->context);
1396                         kfree(ctx_buf);
1397                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1398                                         "6407 Ran out of NVMET XRIs\n");
1399                         return -ENOMEM;
1400                 }
1401                 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1402
1403                 /*
1404                  * Add ctx to MRQidx context list. Our initial assumption
1405                  * is MRQidx will be associated with CPUidx. This association
1406                  * can change on the fly.
1407                  */
1408                 infop = lpfc_get_ctx_list(phba, cpu, idx);
1409                 spin_lock(&infop->nvmet_ctx_list_lock);
1410                 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1411                 infop->nvmet_ctx_list_cnt++;
1412                 spin_unlock(&infop->nvmet_ctx_list_lock);
1413
1414                 /* Spread ctx structures evenly across all MRQs */
1415                 idx++;
1416                 if (idx >= phba->cfg_nvmet_mrq) {
1417                         idx = 0;
1418                         cpu = cpumask_first(cpu_present_mask);
1419                         continue;
1420                 }
1421                 cpu = cpumask_next(cpu, cpu_present_mask);
1422                 if (cpu == nr_cpu_ids)
1423                         cpu = cpumask_first(cpu_present_mask);
1424
1425         }
1426
1427         for_each_present_cpu(i) {
1428                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1429                         infop = lpfc_get_ctx_list(phba, i, j);
1430                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1431                                         "6408 TOTAL NVMET ctx for CPU %d "
1432                                         "MRQ %d: cnt %d nextcpu x%px\n",
1433                                         i, j, infop->nvmet_ctx_list_cnt,
1434                                         infop->nvmet_ctx_next_cpu);
1435                 }
1436         }
1437         return 0;
1438 }
1439
1440 int
1441 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1442 {
1443         struct lpfc_vport  *vport = phba->pport;
1444         struct lpfc_nvmet_tgtport *tgtp;
1445         struct nvmet_fc_port_info pinfo;
1446         int error;
1447
1448         if (phba->targetport)
1449                 return 0;
1450
1451         error = lpfc_nvmet_setup_io_context(phba);
1452         if (error)
1453                 return error;
1454
1455         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1456         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1457         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1458         pinfo.port_id = vport->fc_myDID;
1459
1460         /* We need to tell the transport layer + 1 because it takes page
1461          * alignment into account. When space for the SGL is allocated we
1462          * allocate + 3, one for cmd, one for rsp and one for this alignment
1463          */
1464         lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1465         lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1466         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1467
1468 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1469         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1470                                              &phba->pcidev->dev,
1471                                              &phba->targetport);
1472 #else
1473         error = -ENOENT;
1474 #endif
1475         if (error) {
1476                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1477                                 "6025 Cannot register NVME targetport x%x: "
1478                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1479                                 error,
1480                                 pinfo.port_name, pinfo.node_name,
1481                                 lpfc_tgttemplate.max_sgl_segments,
1482                                 lpfc_tgttemplate.max_hw_queues);
1483                 phba->targetport = NULL;
1484                 phba->nvmet_support = 0;
1485
1486                 lpfc_nvmet_cleanup_io_context(phba);
1487
1488         } else {
1489                 tgtp = (struct lpfc_nvmet_tgtport *)
1490                         phba->targetport->private;
1491                 tgtp->phba = phba;
1492
1493                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1494                                 "6026 Registered NVME "
1495                                 "targetport: x%px, private x%px "
1496                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1497                                 phba->targetport, tgtp,
1498                                 pinfo.port_name, pinfo.node_name,
1499                                 lpfc_tgttemplate.max_sgl_segments,
1500                                 lpfc_tgttemplate.max_hw_queues);
1501
1502                 atomic_set(&tgtp->rcv_ls_req_in, 0);
1503                 atomic_set(&tgtp->rcv_ls_req_out, 0);
1504                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1505                 atomic_set(&tgtp->xmt_ls_abort, 0);
1506                 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1507                 atomic_set(&tgtp->xmt_ls_rsp, 0);
1508                 atomic_set(&tgtp->xmt_ls_drop, 0);
1509                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1510                 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1511                 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1512                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1513                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1514                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1515                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1516                 atomic_set(&tgtp->xmt_fcp_drop, 0);
1517                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1518                 atomic_set(&tgtp->xmt_fcp_read, 0);
1519                 atomic_set(&tgtp->xmt_fcp_write, 0);
1520                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1521                 atomic_set(&tgtp->xmt_fcp_release, 0);
1522                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1523                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1524                 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1525                 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1526                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1527                 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1528                 atomic_set(&tgtp->xmt_fcp_abort, 0);
1529                 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1530                 atomic_set(&tgtp->xmt_abort_unsol, 0);
1531                 atomic_set(&tgtp->xmt_abort_sol, 0);
1532                 atomic_set(&tgtp->xmt_abort_rsp, 0);
1533                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1534                 atomic_set(&tgtp->defer_ctx, 0);
1535                 atomic_set(&tgtp->defer_fod, 0);
1536                 atomic_set(&tgtp->defer_wqfull, 0);
1537         }
1538         return error;
1539 }
1540
1541 int
1542 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1543 {
1544         struct lpfc_vport  *vport = phba->pport;
1545
1546         if (!phba->targetport)
1547                 return 0;
1548
1549         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1550                          "6007 Update NVMET port x%px did x%x\n",
1551                          phba->targetport, vport->fc_myDID);
1552
1553         phba->targetport->port_id = vport->fc_myDID;
1554         return 0;
1555 }
1556
1557 /**
1558  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1559  * @phba: pointer to lpfc hba data structure.
1560  * @axri: pointer to the nvmet xri abort wcqe structure.
1561  *
1562  * This routine is invoked by the worker thread to process a SLI4 fast-path
1563  * NVMET aborted xri.
1564  **/
1565 void
1566 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1567                             struct sli4_wcqe_xri_aborted *axri)
1568 {
1569 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1570         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1571         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1572         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1573         struct lpfc_nvmet_tgtport *tgtp;
1574         struct nvmefc_tgt_fcp_req *req = NULL;
1575         struct lpfc_nodelist *ndlp;
1576         unsigned long iflag = 0;
1577         int rrq_empty = 0;
1578         bool released = false;
1579
1580         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1581                         "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1582
1583         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1584                 return;
1585
1586         if (phba->targetport) {
1587                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1588                 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1589         }
1590
1591         spin_lock_irqsave(&phba->hbalock, iflag);
1592         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1593         list_for_each_entry_safe(ctxp, next_ctxp,
1594                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1595                                  list) {
1596                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1597                         continue;
1598
1599                 spin_lock(&ctxp->ctxlock);
1600                 /* Check if we already received a free context call
1601                  * and we have completed processing an abort situation.
1602                  */
1603                 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1604                     !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1605                         list_del_init(&ctxp->list);
1606                         released = true;
1607                 }
1608                 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1609                 spin_unlock(&ctxp->ctxlock);
1610                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1611
1612                 rrq_empty = list_empty(&phba->active_rrq_list);
1613                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1614                 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1615                 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1616                     (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1617                      ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1618                         lpfc_set_rrq_active(phba, ndlp,
1619                                 ctxp->ctxbuf->sglq->sli4_lxritag,
1620                                 rxid, 1);
1621                         lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1622                 }
1623
1624                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1625                                 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1626                                 ctxp->oxid, ctxp->flag, released);
1627                 if (released)
1628                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1629
1630                 if (rrq_empty)
1631                         lpfc_worker_wake_up(phba);
1632                 return;
1633         }
1634         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1635         spin_unlock_irqrestore(&phba->hbalock, iflag);
1636
1637         ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1638         if (ctxp) {
1639                 /*
1640                  *  Abort already done by FW, so BA_ACC sent.
1641                  *  However, the transport may be unaware.
1642                  */
1643                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1644                                 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1645                                 "flag x%x oxid x%x rxid x%x\n",
1646                                 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1647                                 rxid);
1648
1649                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1650                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1651                 ctxp->state = LPFC_NVMET_STE_ABORT;
1652                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1653
1654                 lpfc_nvmeio_data(phba,
1655                                  "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1656                                  xri, raw_smp_processor_id(), 0);
1657
1658                 req = &ctxp->ctx.fcp_req;
1659                 if (req)
1660                         nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1661         }
1662 #endif
1663 }
1664
1665 int
1666 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1667                            struct fc_frame_header *fc_hdr)
1668 {
1669 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1670         struct lpfc_hba *phba = vport->phba;
1671         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1672         struct nvmefc_tgt_fcp_req *rsp;
1673         uint32_t sid;
1674         uint16_t oxid, xri;
1675         unsigned long iflag = 0;
1676
1677         sid = sli4_sid_from_fc_hdr(fc_hdr);
1678         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1679
1680         spin_lock_irqsave(&phba->hbalock, iflag);
1681         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1682         list_for_each_entry_safe(ctxp, next_ctxp,
1683                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1684                                  list) {
1685                 if (ctxp->oxid != oxid || ctxp->sid != sid)
1686                         continue;
1687
1688                 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1689
1690                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1691                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1692
1693                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1694                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1695                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1696
1697                 lpfc_nvmeio_data(phba,
1698                         "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1699                         xri, raw_smp_processor_id(), 0);
1700
1701                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1702                                 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1703
1704                 rsp = &ctxp->ctx.fcp_req;
1705                 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1706
1707                 /* Respond with BA_ACC accordingly */
1708                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1709                 return 0;
1710         }
1711         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1712         spin_unlock_irqrestore(&phba->hbalock, iflag);
1713
1714         /* check the wait list */
1715         if (phba->sli4_hba.nvmet_io_wait_cnt) {
1716                 struct rqb_dmabuf *nvmebuf;
1717                 struct fc_frame_header *fc_hdr_tmp;
1718                 u32 sid_tmp;
1719                 u16 oxid_tmp;
1720                 bool found = false;
1721
1722                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1723
1724                 /* match by oxid and s_id */
1725                 list_for_each_entry(nvmebuf,
1726                                     &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1727                                     hbuf.list) {
1728                         fc_hdr_tmp = (struct fc_frame_header *)
1729                                         (nvmebuf->hbuf.virt);
1730                         oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1731                         sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1732                         if (oxid_tmp != oxid || sid_tmp != sid)
1733                                 continue;
1734
1735                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1736                                         "6321 NVMET Rcv ABTS oxid x%x from x%x "
1737                                         "is waiting for a ctxp\n",
1738                                         oxid, sid);
1739
1740                         list_del_init(&nvmebuf->hbuf.list);
1741                         phba->sli4_hba.nvmet_io_wait_cnt--;
1742                         found = true;
1743                         break;
1744                 }
1745                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1746                                        iflag);
1747
1748                 /* free buffer since already posted a new DMA buffer to RQ */
1749                 if (found) {
1750                         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1751                         /* Respond with BA_ACC accordingly */
1752                         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1753                         return 0;
1754                 }
1755         }
1756
1757         /* check active list */
1758         ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1759         if (ctxp) {
1760                 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1761
1762                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1763                 ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
1764                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1765
1766                 lpfc_nvmeio_data(phba,
1767                                  "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1768                                  xri, raw_smp_processor_id(), 0);
1769
1770                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1771                                 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1772                                 "flag x%x state x%x\n",
1773                                 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1774
1775                 if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
1776                         /* Notify the transport */
1777                         nvmet_fc_rcv_fcp_abort(phba->targetport,
1778                                                &ctxp->ctx.fcp_req);
1779                 } else {
1780                         cancel_work_sync(&ctxp->ctxbuf->defer_work);
1781                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1782                         lpfc_nvmet_defer_release(phba, ctxp);
1783                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1784                 }
1785                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1786                                                ctxp->oxid);
1787
1788                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1789                 return 0;
1790         }
1791
1792         lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1793                          oxid, raw_smp_processor_id(), 1);
1794
1795         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1796                         "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1797
1798         /* Respond with BA_RJT accordingly */
1799         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1800 #endif
1801         return 0;
1802 }
1803
1804 static void
1805 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1806                         struct lpfc_nvmet_rcv_ctx *ctxp)
1807 {
1808         struct lpfc_sli_ring *pring;
1809         struct lpfc_iocbq *nvmewqeq;
1810         struct lpfc_iocbq *next_nvmewqeq;
1811         unsigned long iflags;
1812         struct lpfc_wcqe_complete wcqe;
1813         struct lpfc_wcqe_complete *wcqep;
1814
1815         pring = wq->pring;
1816         wcqep = &wcqe;
1817
1818         /* Fake an ABORT error code back to cmpl routine */
1819         memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1820         bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1821         wcqep->parameter = IOERR_ABORT_REQUESTED;
1822
1823         spin_lock_irqsave(&pring->ring_lock, iflags);
1824         list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1825                                  &wq->wqfull_list, list) {
1826                 if (ctxp) {
1827                         /* Checking for a specific IO to flush */
1828                         if (nvmewqeq->context2 == ctxp) {
1829                                 list_del(&nvmewqeq->list);
1830                                 spin_unlock_irqrestore(&pring->ring_lock,
1831                                                        iflags);
1832                                 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1833                                                           wcqep);
1834                                 return;
1835                         }
1836                         continue;
1837                 } else {
1838                         /* Flush all IOs */
1839                         list_del(&nvmewqeq->list);
1840                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1841                         lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1842                         spin_lock_irqsave(&pring->ring_lock, iflags);
1843                 }
1844         }
1845         if (!ctxp)
1846                 wq->q_flag &= ~HBA_NVMET_WQFULL;
1847         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1848 }
1849
1850 void
1851 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1852                           struct lpfc_queue *wq)
1853 {
1854 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1855         struct lpfc_sli_ring *pring;
1856         struct lpfc_iocbq *nvmewqeq;
1857         struct lpfc_nvmet_rcv_ctx *ctxp;
1858         unsigned long iflags;
1859         int rc;
1860
1861         /*
1862          * Some WQE slots are available, so try to re-issue anything
1863          * on the WQ wqfull_list.
1864          */
1865         pring = wq->pring;
1866         spin_lock_irqsave(&pring->ring_lock, iflags);
1867         while (!list_empty(&wq->wqfull_list)) {
1868                 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1869                                  list);
1870                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1871                 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1872                 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1873                 spin_lock_irqsave(&pring->ring_lock, iflags);
1874                 if (rc == -EBUSY) {
1875                         /* WQ was full again, so put it back on the list */
1876                         list_add(&nvmewqeq->list, &wq->wqfull_list);
1877                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1878                         return;
1879                 }
1880                 if (rc == WQE_SUCCESS) {
1881 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1882                         if (ctxp->ts_cmd_nvme) {
1883                                 if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
1884                                         ctxp->ts_status_wqput = ktime_get_ns();
1885                                 else
1886                                         ctxp->ts_data_wqput = ktime_get_ns();
1887                         }
1888 #endif
1889                 } else {
1890                         WARN_ON(rc);
1891                 }
1892         }
1893         wq->q_flag &= ~HBA_NVMET_WQFULL;
1894         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1895
1896 #endif
1897 }
1898
1899 void
1900 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1901 {
1902 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1903         struct lpfc_nvmet_tgtport *tgtp;
1904         struct lpfc_queue *wq;
1905         uint32_t qidx;
1906         DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1907
1908         if (phba->nvmet_support == 0)
1909                 return;
1910         if (phba->targetport) {
1911                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1912                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1913                         wq = phba->sli4_hba.hdwq[qidx].io_wq;
1914                         lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1915                 }
1916                 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1917                 nvmet_fc_unregister_targetport(phba->targetport);
1918                 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1919                                         msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1920                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1921                                         "6179 Unreg targetport x%px timeout "
1922                                         "reached.\n", phba->targetport);
1923                 lpfc_nvmet_cleanup_io_context(phba);
1924         }
1925         phba->targetport = NULL;
1926 #endif
1927 }
1928
1929 /**
1930  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1931  * @phba: pointer to lpfc hba data structure.
1932  * @pring: pointer to a SLI ring.
1933  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1934  *
1935  * This routine is used for processing the WQE associated with a unsolicited
1936  * event. It first determines whether there is an existing ndlp that matches
1937  * the DID from the unsolicited WQE. If not, it will create a new one with
1938  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1939  * WQE is then used to invoke the proper routine and to set up proper state
1940  * of the discovery state machine.
1941  **/
1942 static void
1943 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1944                            struct hbq_dmabuf *nvmebuf)
1945 {
1946 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1947         struct lpfc_nvmet_tgtport *tgtp;
1948         struct fc_frame_header *fc_hdr;
1949         struct lpfc_nvmet_rcv_ctx *ctxp;
1950         uint32_t *payload;
1951         uint32_t size, oxid, sid, rc;
1952
1953
1954         if (!nvmebuf || !phba->targetport) {
1955                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1956                                 "6154 LS Drop IO\n");
1957                 oxid = 0;
1958                 size = 0;
1959                 sid = 0;
1960                 ctxp = NULL;
1961                 goto dropit;
1962         }
1963
1964         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1965         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1966
1967         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1968         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1969         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1970         sid = sli4_sid_from_fc_hdr(fc_hdr);
1971
1972         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1973         if (ctxp == NULL) {
1974                 atomic_inc(&tgtp->rcv_ls_req_drop);
1975                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1976                                 "6155 LS Drop IO x%x: Alloc\n",
1977                                 oxid);
1978 dropit:
1979                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1980                                  "xri x%x sz %d from %06x\n",
1981                                  oxid, size, sid);
1982                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1983                 return;
1984         }
1985         ctxp->phba = phba;
1986         ctxp->size = size;
1987         ctxp->oxid = oxid;
1988         ctxp->sid = sid;
1989         ctxp->wqeq = NULL;
1990         ctxp->state = LPFC_NVMET_STE_LS_RCV;
1991         ctxp->entry_cnt = 1;
1992         ctxp->rqb_buffer = (void *)nvmebuf;
1993         ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1994
1995         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1996                          oxid, size, sid);
1997         /*
1998          * The calling sequence should be:
1999          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
2000          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
2001          */
2002         atomic_inc(&tgtp->rcv_ls_req_in);
2003         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
2004                                  payload, size);
2005
2006         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2007                         "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2008                         "%08x %08x %08x\n", size, rc,
2009                         *payload, *(payload+1), *(payload+2),
2010                         *(payload+3), *(payload+4), *(payload+5));
2011
2012         if (rc == 0) {
2013                 atomic_inc(&tgtp->rcv_ls_req_out);
2014                 return;
2015         }
2016
2017         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
2018                          oxid, size, sid);
2019
2020         atomic_inc(&tgtp->rcv_ls_req_drop);
2021         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2022                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
2023                         ctxp->oxid, rc);
2024
2025         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
2026         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2027
2028         atomic_inc(&tgtp->xmt_ls_abort);
2029         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
2030 #endif
2031 }
2032
2033 static void
2034 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2035 {
2036 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2037         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
2038         struct lpfc_hba *phba = ctxp->phba;
2039         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2040         struct lpfc_nvmet_tgtport *tgtp;
2041         uint32_t *payload, qno;
2042         uint32_t rc;
2043         unsigned long iflags;
2044
2045         if (!nvmebuf) {
2046                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2047                         "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2048                         "oxid: x%x flg: x%x state: x%x\n",
2049                         ctxp->oxid, ctxp->flag, ctxp->state);
2050                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2051                 lpfc_nvmet_defer_release(phba, ctxp);
2052                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2053                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2054                                                  ctxp->oxid);
2055                 return;
2056         }
2057
2058         if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
2059                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2060                                 "6324 IO oxid x%x aborted\n",
2061                                 ctxp->oxid);
2062                 return;
2063         }
2064
2065         payload = (uint32_t *)(nvmebuf->dbuf.virt);
2066         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2067         ctxp->flag |= LPFC_NVMET_TNOTIFY;
2068 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2069         if (ctxp->ts_isr_cmd)
2070                 ctxp->ts_cmd_nvme = ktime_get_ns();
2071 #endif
2072         /*
2073          * The calling sequence should be:
2074          * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2075          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2076          * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2077          * the NVME command / FC header is stored.
2078          * A buffer has already been reposted for this IO, so just free
2079          * the nvmebuf.
2080          */
2081         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2082                                   payload, ctxp->size);
2083         /* Process FCP command */
2084         if (rc == 0) {
2085                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2086                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2087                 if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
2088                     (nvmebuf != ctxp->rqb_buffer)) {
2089                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2090                         return;
2091                 }
2092                 ctxp->rqb_buffer = NULL;
2093                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2094                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2095                 return;
2096         }
2097
2098         /* Processing of FCP command is deferred */
2099         if (rc == -EOVERFLOW) {
2100                 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2101                                  "from %06x\n",
2102                                  ctxp->oxid, ctxp->size, ctxp->sid);
2103                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2104                 atomic_inc(&tgtp->defer_fod);
2105                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2106                 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
2107                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2108                         return;
2109                 }
2110                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2111                 /*
2112                  * Post a replacement DMA buffer to RQ and defer
2113                  * freeing rcv buffer till .defer_rcv callback
2114                  */
2115                 qno = nvmebuf->idx;
2116                 lpfc_post_rq_buffer(
2117                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2118                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2119                 return;
2120         }
2121         ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
2122         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2123         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2124                         "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2125                         ctxp->oxid, rc,
2126                         atomic_read(&tgtp->rcv_fcp_cmd_in),
2127                         atomic_read(&tgtp->rcv_fcp_cmd_out),
2128                         atomic_read(&tgtp->xmt_fcp_release));
2129         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2130                          ctxp->oxid, ctxp->size, ctxp->sid);
2131         spin_lock_irqsave(&ctxp->ctxlock, iflags);
2132         lpfc_nvmet_defer_release(phba, ctxp);
2133         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2134         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2135 #endif
2136 }
2137
2138 static void
2139 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2140 {
2141 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2142         struct lpfc_nvmet_ctxbuf *ctx_buf =
2143                 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2144
2145         lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2146 #endif
2147 }
2148
2149 static struct lpfc_nvmet_ctxbuf *
2150 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2151                              struct lpfc_nvmet_ctx_info *current_infop)
2152 {
2153 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2154         struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2155         struct lpfc_nvmet_ctx_info *get_infop;
2156         int i;
2157
2158         /*
2159          * The current_infop for the MRQ a NVME command IU was received
2160          * on is empty. Our goal is to replenish this MRQs context
2161          * list from a another CPUs.
2162          *
2163          * First we need to pick a context list to start looking on.
2164          * nvmet_ctx_start_cpu has available context the last time
2165          * we needed to replenish this CPU where nvmet_ctx_next_cpu
2166          * is just the next sequential CPU for this MRQ.
2167          */
2168         if (current_infop->nvmet_ctx_start_cpu)
2169                 get_infop = current_infop->nvmet_ctx_start_cpu;
2170         else
2171                 get_infop = current_infop->nvmet_ctx_next_cpu;
2172
2173         for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2174                 if (get_infop == current_infop) {
2175                         get_infop = get_infop->nvmet_ctx_next_cpu;
2176                         continue;
2177                 }
2178                 spin_lock(&get_infop->nvmet_ctx_list_lock);
2179
2180                 /* Just take the entire context list, if there are any */
2181                 if (get_infop->nvmet_ctx_list_cnt) {
2182                         list_splice_init(&get_infop->nvmet_ctx_list,
2183                                     &current_infop->nvmet_ctx_list);
2184                         current_infop->nvmet_ctx_list_cnt =
2185                                 get_infop->nvmet_ctx_list_cnt - 1;
2186                         get_infop->nvmet_ctx_list_cnt = 0;
2187                         spin_unlock(&get_infop->nvmet_ctx_list_lock);
2188
2189                         current_infop->nvmet_ctx_start_cpu = get_infop;
2190                         list_remove_head(&current_infop->nvmet_ctx_list,
2191                                          ctx_buf, struct lpfc_nvmet_ctxbuf,
2192                                          list);
2193                         return ctx_buf;
2194                 }
2195
2196                 /* Otherwise, move on to the next CPU for this MRQ */
2197                 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2198                 get_infop = get_infop->nvmet_ctx_next_cpu;
2199         }
2200
2201 #endif
2202         /* Nothing found, all contexts for the MRQ are in-flight */
2203         return NULL;
2204 }
2205
2206 /**
2207  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2208  * @phba: pointer to lpfc hba data structure.
2209  * @idx: relative index of MRQ vector
2210  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2211  * @isr_timestamp: in jiffies.
2212  * @cqflag: cq processing information regarding workload.
2213  *
2214  * This routine is used for processing the WQE associated with a unsolicited
2215  * event. It first determines whether there is an existing ndlp that matches
2216  * the DID from the unsolicited WQE. If not, it will create a new one with
2217  * the DID from the unsolicited WQE. The ELS command from the unsolicited
2218  * WQE is then used to invoke the proper routine and to set up proper state
2219  * of the discovery state machine.
2220  **/
2221 static void
2222 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2223                             uint32_t idx,
2224                             struct rqb_dmabuf *nvmebuf,
2225                             uint64_t isr_timestamp,
2226                             uint8_t cqflag)
2227 {
2228         struct lpfc_nvmet_rcv_ctx *ctxp;
2229         struct lpfc_nvmet_tgtport *tgtp;
2230         struct fc_frame_header *fc_hdr;
2231         struct lpfc_nvmet_ctxbuf *ctx_buf;
2232         struct lpfc_nvmet_ctx_info *current_infop;
2233         uint32_t size, oxid, sid, qno;
2234         unsigned long iflag;
2235         int current_cpu;
2236
2237         if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2238                 return;
2239
2240         ctx_buf = NULL;
2241         if (!nvmebuf || !phba->targetport) {
2242                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2243                                 "6157 NVMET FCP Drop IO\n");
2244                 if (nvmebuf)
2245                         lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2246                 return;
2247         }
2248
2249         /*
2250          * Get a pointer to the context list for this MRQ based on
2251          * the CPU this MRQ IRQ is associated with. If the CPU association
2252          * changes from our initial assumption, the context list could
2253          * be empty, thus it would need to be replenished with the
2254          * context list from another CPU for this MRQ.
2255          */
2256         current_cpu = raw_smp_processor_id();
2257         current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2258         spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2259         if (current_infop->nvmet_ctx_list_cnt) {
2260                 list_remove_head(&current_infop->nvmet_ctx_list,
2261                                  ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2262                 current_infop->nvmet_ctx_list_cnt--;
2263         } else {
2264                 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2265         }
2266         spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2267
2268         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2269         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2270         size = nvmebuf->bytes_recv;
2271
2272 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2273         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
2274                 if (current_cpu < LPFC_CHECK_CPU_CNT) {
2275                         if (idx != current_cpu)
2276                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2277                                                 "6703 CPU Check rcv: "
2278                                                 "cpu %d expect %d\n",
2279                                                 current_cpu, idx);
2280                         phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
2281                 }
2282         }
2283 #endif
2284
2285         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2286                          oxid, size, raw_smp_processor_id());
2287
2288         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2289
2290         if (!ctx_buf) {
2291                 /* Queue this NVME IO to process later */
2292                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2293                 list_add_tail(&nvmebuf->hbuf.list,
2294                               &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2295                 phba->sli4_hba.nvmet_io_wait_cnt++;
2296                 phba->sli4_hba.nvmet_io_wait_total++;
2297                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2298                                        iflag);
2299
2300                 /* Post a brand new DMA buffer to RQ */
2301                 qno = nvmebuf->idx;
2302                 lpfc_post_rq_buffer(
2303                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2304                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2305
2306                 atomic_inc(&tgtp->defer_ctx);
2307                 return;
2308         }
2309
2310         sid = sli4_sid_from_fc_hdr(fc_hdr);
2311
2312         ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2313         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2314         list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2315         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2316         if (ctxp->state != LPFC_NVMET_STE_FREE) {
2317                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2318                                 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2319                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2320         }
2321         ctxp->wqeq = NULL;
2322         ctxp->offset = 0;
2323         ctxp->phba = phba;
2324         ctxp->size = size;
2325         ctxp->oxid = oxid;
2326         ctxp->sid = sid;
2327         ctxp->idx = idx;
2328         ctxp->state = LPFC_NVMET_STE_RCV;
2329         ctxp->entry_cnt = 1;
2330         ctxp->flag = 0;
2331         ctxp->ctxbuf = ctx_buf;
2332         ctxp->rqb_buffer = (void *)nvmebuf;
2333         ctxp->hdwq = NULL;
2334         spin_lock_init(&ctxp->ctxlock);
2335
2336 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2337         if (isr_timestamp)
2338                 ctxp->ts_isr_cmd = isr_timestamp;
2339         ctxp->ts_cmd_nvme = 0;
2340         ctxp->ts_nvme_data = 0;
2341         ctxp->ts_data_wqput = 0;
2342         ctxp->ts_isr_data = 0;
2343         ctxp->ts_data_nvme = 0;
2344         ctxp->ts_nvme_status = 0;
2345         ctxp->ts_status_wqput = 0;
2346         ctxp->ts_isr_status = 0;
2347         ctxp->ts_status_nvme = 0;
2348 #endif
2349
2350         atomic_inc(&tgtp->rcv_fcp_cmd_in);
2351         /* check for cq processing load */
2352         if (!cqflag) {
2353                 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2354                 return;
2355         }
2356
2357         if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2358                 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2359                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2360                                 "6325 Unable to queue work for oxid x%x. "
2361                                 "FCP Drop IO [x%x x%x x%x]\n",
2362                                 ctxp->oxid,
2363                                 atomic_read(&tgtp->rcv_fcp_cmd_in),
2364                                 atomic_read(&tgtp->rcv_fcp_cmd_out),
2365                                 atomic_read(&tgtp->xmt_fcp_release));
2366
2367                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2368                 lpfc_nvmet_defer_release(phba, ctxp);
2369                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2370                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2371         }
2372 }
2373
2374 /**
2375  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2376  * @phba: pointer to lpfc hba data structure.
2377  * @pring: pointer to a SLI ring.
2378  * @nvmebuf: pointer to received nvme data structure.
2379  *
2380  * This routine is used to process an unsolicited event received from a SLI
2381  * (Service Level Interface) ring. The actual processing of the data buffer
2382  * associated with the unsolicited event is done by invoking the routine
2383  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2384  * SLI RQ on which the unsolicited event was received.
2385  **/
2386 void
2387 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2388                           struct lpfc_iocbq *piocb)
2389 {
2390         struct lpfc_dmabuf *d_buf;
2391         struct hbq_dmabuf *nvmebuf;
2392
2393         d_buf = piocb->context2;
2394         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2395
2396         if (!nvmebuf) {
2397                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2398                                 "3015 LS Drop IO\n");
2399                 return;
2400         }
2401         if (phba->nvmet_support == 0) {
2402                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2403                 return;
2404         }
2405         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2406 }
2407
2408 /**
2409  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2410  * @phba: pointer to lpfc hba data structure.
2411  * @idx: relative index of MRQ vector
2412  * @nvmebuf: pointer to received nvme data structure.
2413  * @isr_timestamp: in jiffies.
2414  * @cqflag: cq processing information regarding workload.
2415  *
2416  * This routine is used to process an unsolicited event received from a SLI
2417  * (Service Level Interface) ring. The actual processing of the data buffer
2418  * associated with the unsolicited event is done by invoking the routine
2419  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2420  * SLI RQ on which the unsolicited event was received.
2421  **/
2422 void
2423 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2424                            uint32_t idx,
2425                            struct rqb_dmabuf *nvmebuf,
2426                            uint64_t isr_timestamp,
2427                            uint8_t cqflag)
2428 {
2429         if (!nvmebuf) {
2430                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2431                                 "3167 NVMET FCP Drop IO\n");
2432                 return;
2433         }
2434         if (phba->nvmet_support == 0) {
2435                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2436                 return;
2437         }
2438         lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2439 }
2440
2441 /**
2442  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2443  * @phba: pointer to a host N_Port data structure.
2444  * @ctxp: Context info for NVME LS Request
2445  * @rspbuf: DMA buffer of NVME command.
2446  * @rspsize: size of the NVME command.
2447  *
2448  * This routine is used for allocating a lpfc-WQE data structure from
2449  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2450  * passed into the routine for discovery state machine to issue an Extended
2451  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2452  * and preparation routine that is used by all the discovery state machine
2453  * routines and the NVME command-specific fields will be later set up by
2454  * the individual discovery machine routines after calling this routine
2455  * allocating and preparing a generic WQE data structure. It fills in the
2456  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2457  * payload and response payload (if expected). The reference count on the
2458  * ndlp is incremented by 1 and the reference to the ndlp is put into
2459  * context1 of the WQE data structure for this WQE to hold the ndlp
2460  * reference for the command's callback function to access later.
2461  *
2462  * Return code
2463  *   Pointer to the newly allocated/prepared nvme wqe data structure
2464  *   NULL - when nvme wqe data structure allocation/preparation failed
2465  **/
2466 static struct lpfc_iocbq *
2467 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2468                        struct lpfc_nvmet_rcv_ctx *ctxp,
2469                        dma_addr_t rspbuf, uint16_t rspsize)
2470 {
2471         struct lpfc_nodelist *ndlp;
2472         struct lpfc_iocbq *nvmewqe;
2473         union lpfc_wqe128 *wqe;
2474
2475         if (!lpfc_is_link_up(phba)) {
2476                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2477                                 "6104 NVMET prep LS wqe: link err: "
2478                                 "NPORT x%x oxid:x%x ste %d\n",
2479                                 ctxp->sid, ctxp->oxid, ctxp->state);
2480                 return NULL;
2481         }
2482
2483         /* Allocate buffer for  command wqe */
2484         nvmewqe = lpfc_sli_get_iocbq(phba);
2485         if (nvmewqe == NULL) {
2486                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2487                                 "6105 NVMET prep LS wqe: No WQE: "
2488                                 "NPORT x%x oxid x%x ste %d\n",
2489                                 ctxp->sid, ctxp->oxid, ctxp->state);
2490                 return NULL;
2491         }
2492
2493         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2494         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2495             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2496             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2497                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2498                                 "6106 NVMET prep LS wqe: No ndlp: "
2499                                 "NPORT x%x oxid x%x ste %d\n",
2500                                 ctxp->sid, ctxp->oxid, ctxp->state);
2501                 goto nvme_wqe_free_wqeq_exit;
2502         }
2503         ctxp->wqeq = nvmewqe;
2504
2505         /* prevent preparing wqe with NULL ndlp reference */
2506         nvmewqe->context1 = lpfc_nlp_get(ndlp);
2507         if (nvmewqe->context1 == NULL)
2508                 goto nvme_wqe_free_wqeq_exit;
2509         nvmewqe->context2 = ctxp;
2510
2511         wqe = &nvmewqe->wqe;
2512         memset(wqe, 0, sizeof(union lpfc_wqe));
2513
2514         /* Words 0 - 2 */
2515         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2516         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2517         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2518         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2519
2520         /* Word 3 */
2521
2522         /* Word 4 */
2523
2524         /* Word 5 */
2525         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2526         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2527         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2528         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2529         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2530
2531         /* Word 6 */
2532         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2533                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2534         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2535
2536         /* Word 7 */
2537         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2538                CMD_XMIT_SEQUENCE64_WQE);
2539         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2540         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2541         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2542
2543         /* Word 8 */
2544         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2545
2546         /* Word 9 */
2547         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2548         /* Needs to be set by caller */
2549         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2550
2551         /* Word 10 */
2552         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2553         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2554         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2555                LPFC_WQE_LENLOC_WORD12);
2556         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2557
2558         /* Word 11 */
2559         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2560                LPFC_WQE_CQ_ID_DEFAULT);
2561         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2562                OTHER_COMMAND);
2563
2564         /* Word 12 */
2565         wqe->xmit_sequence.xmit_len = rspsize;
2566
2567         nvmewqe->retry = 1;
2568         nvmewqe->vport = phba->pport;
2569         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2570         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2571
2572         /* Xmit NVMET response to remote NPORT <did> */
2573         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2574                         "6039 Xmit NVMET LS response to remote "
2575                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2576                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2577                         rspsize);
2578         return nvmewqe;
2579
2580 nvme_wqe_free_wqeq_exit:
2581         nvmewqe->context2 = NULL;
2582         nvmewqe->context3 = NULL;
2583         lpfc_sli_release_iocbq(phba, nvmewqe);
2584         return NULL;
2585 }
2586
2587
2588 static struct lpfc_iocbq *
2589 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2590                         struct lpfc_nvmet_rcv_ctx *ctxp)
2591 {
2592         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2593         struct lpfc_nvmet_tgtport *tgtp;
2594         struct sli4_sge *sgl;
2595         struct lpfc_nodelist *ndlp;
2596         struct lpfc_iocbq *nvmewqe;
2597         struct scatterlist *sgel;
2598         union lpfc_wqe128 *wqe;
2599         struct ulp_bde64 *bde;
2600         dma_addr_t physaddr;
2601         int i, cnt;
2602         int do_pbde;
2603         int xc = 1;
2604
2605         if (!lpfc_is_link_up(phba)) {
2606                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2607                                 "6107 NVMET prep FCP wqe: link err:"
2608                                 "NPORT x%x oxid x%x ste %d\n",
2609                                 ctxp->sid, ctxp->oxid, ctxp->state);
2610                 return NULL;
2611         }
2612
2613         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2614         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2615             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2616              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2617                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2618                                 "6108 NVMET prep FCP wqe: no ndlp: "
2619                                 "NPORT x%x oxid x%x ste %d\n",
2620                                 ctxp->sid, ctxp->oxid, ctxp->state);
2621                 return NULL;
2622         }
2623
2624         if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2625                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2626                                 "6109 NVMET prep FCP wqe: seg cnt err: "
2627                                 "NPORT x%x oxid x%x ste %d cnt %d\n",
2628                                 ctxp->sid, ctxp->oxid, ctxp->state,
2629                                 phba->cfg_nvme_seg_cnt);
2630                 return NULL;
2631         }
2632
2633         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2634         nvmewqe = ctxp->wqeq;
2635         if (nvmewqe == NULL) {
2636                 /* Allocate buffer for  command wqe */
2637                 nvmewqe = ctxp->ctxbuf->iocbq;
2638                 if (nvmewqe == NULL) {
2639                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2640                                         "6110 NVMET prep FCP wqe: No "
2641                                         "WQE: NPORT x%x oxid x%x ste %d\n",
2642                                         ctxp->sid, ctxp->oxid, ctxp->state);
2643                         return NULL;
2644                 }
2645                 ctxp->wqeq = nvmewqe;
2646                 xc = 0; /* create new XRI */
2647                 nvmewqe->sli4_lxritag = NO_XRI;
2648                 nvmewqe->sli4_xritag = NO_XRI;
2649         }
2650
2651         /* Sanity check */
2652         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2653             (ctxp->entry_cnt == 1)) ||
2654             (ctxp->state == LPFC_NVMET_STE_DATA)) {
2655                 wqe = &nvmewqe->wqe;
2656         } else {
2657                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2658                                 "6111 Wrong state NVMET FCP: %d  cnt %d\n",
2659                                 ctxp->state, ctxp->entry_cnt);
2660                 return NULL;
2661         }
2662
2663         sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2664         switch (rsp->op) {
2665         case NVMET_FCOP_READDATA:
2666         case NVMET_FCOP_READDATA_RSP:
2667                 /* From the tsend template, initialize words 7 - 11 */
2668                 memcpy(&wqe->words[7],
2669                        &lpfc_tsend_cmd_template.words[7],
2670                        sizeof(uint32_t) * 5);
2671
2672                 /* Words 0 - 2 : The first sg segment */
2673                 sgel = &rsp->sg[0];
2674                 physaddr = sg_dma_address(sgel);
2675                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2676                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2677                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2678                 wqe->fcp_tsend.bde.addrHigh =
2679                         cpu_to_le32(putPaddrHigh(physaddr));
2680
2681                 /* Word 3 */
2682                 wqe->fcp_tsend.payload_offset_len = 0;
2683
2684                 /* Word 4 */
2685                 wqe->fcp_tsend.relative_offset = ctxp->offset;
2686
2687                 /* Word 5 */
2688                 wqe->fcp_tsend.reserved = 0;
2689
2690                 /* Word 6 */
2691                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2692                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2693                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2694                        nvmewqe->sli4_xritag);
2695
2696                 /* Word 7 - set ar later */
2697
2698                 /* Word 8 */
2699                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2700
2701                 /* Word 9 */
2702                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2703                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2704
2705                 /* Word 10 - set wqes later, in template xc=1 */
2706                 if (!xc)
2707                         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2708
2709                 /* Word 11 - set sup, irsp, irsplen later */
2710                 do_pbde = 0;
2711
2712                 /* Word 12 */
2713                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2714
2715                 /* Setup 2 SKIP SGEs */
2716                 sgl->addr_hi = 0;
2717                 sgl->addr_lo = 0;
2718                 sgl->word2 = 0;
2719                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2720                 sgl->word2 = cpu_to_le32(sgl->word2);
2721                 sgl->sge_len = 0;
2722                 sgl++;
2723                 sgl->addr_hi = 0;
2724                 sgl->addr_lo = 0;
2725                 sgl->word2 = 0;
2726                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2727                 sgl->word2 = cpu_to_le32(sgl->word2);
2728                 sgl->sge_len = 0;
2729                 sgl++;
2730                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2731                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
2732
2733                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2734
2735                         if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2736                                 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2737                                         bf_set(wqe_sup,
2738                                                &wqe->fcp_tsend.wqe_com, 1);
2739                         } else {
2740                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2741                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2742                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2743                                        ((rsp->rsplen >> 2) - 1));
2744                                 memcpy(&wqe->words[16], rsp->rspaddr,
2745                                        rsp->rsplen);
2746                         }
2747                 } else {
2748                         atomic_inc(&tgtp->xmt_fcp_read);
2749
2750                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2751                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2752                 }
2753                 break;
2754
2755         case NVMET_FCOP_WRITEDATA:
2756                 /* From the treceive template, initialize words 3 - 11 */
2757                 memcpy(&wqe->words[3],
2758                        &lpfc_treceive_cmd_template.words[3],
2759                        sizeof(uint32_t) * 9);
2760
2761                 /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2762                 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2763                 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2764                 wqe->fcp_treceive.bde.addrLow = 0;
2765                 wqe->fcp_treceive.bde.addrHigh = 0;
2766
2767                 /* Word 4 */
2768                 wqe->fcp_treceive.relative_offset = ctxp->offset;
2769
2770                 /* Word 6 */
2771                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2772                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2773                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2774                        nvmewqe->sli4_xritag);
2775
2776                 /* Word 7 */
2777
2778                 /* Word 8 */
2779                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2780
2781                 /* Word 9 */
2782                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2783                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2784
2785                 /* Word 10 - in template xc=1 */
2786                 if (!xc)
2787                         bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2788
2789                 /* Word 11 - set pbde later */
2790                 if (phba->cfg_enable_pbde) {
2791                         do_pbde = 1;
2792                 } else {
2793                         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2794                         do_pbde = 0;
2795                 }
2796
2797                 /* Word 12 */
2798                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2799
2800                 /* Setup 2 SKIP SGEs */
2801                 sgl->addr_hi = 0;
2802                 sgl->addr_lo = 0;
2803                 sgl->word2 = 0;
2804                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2805                 sgl->word2 = cpu_to_le32(sgl->word2);
2806                 sgl->sge_len = 0;
2807                 sgl++;
2808                 sgl->addr_hi = 0;
2809                 sgl->addr_lo = 0;
2810                 sgl->word2 = 0;
2811                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2812                 sgl->word2 = cpu_to_le32(sgl->word2);
2813                 sgl->sge_len = 0;
2814                 sgl++;
2815                 atomic_inc(&tgtp->xmt_fcp_write);
2816                 break;
2817
2818         case NVMET_FCOP_RSP:
2819                 /* From the treceive template, initialize words 4 - 11 */
2820                 memcpy(&wqe->words[4],
2821                        &lpfc_trsp_cmd_template.words[4],
2822                        sizeof(uint32_t) * 8);
2823
2824                 /* Words 0 - 2 */
2825                 physaddr = rsp->rspdma;
2826                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2827                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2828                 wqe->fcp_trsp.bde.addrLow =
2829                         cpu_to_le32(putPaddrLow(physaddr));
2830                 wqe->fcp_trsp.bde.addrHigh =
2831                         cpu_to_le32(putPaddrHigh(physaddr));
2832
2833                 /* Word 3 */
2834                 wqe->fcp_trsp.response_len = rsp->rsplen;
2835
2836                 /* Word 6 */
2837                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2838                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2839                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2840                        nvmewqe->sli4_xritag);
2841
2842                 /* Word 7 */
2843
2844                 /* Word 8 */
2845                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2846
2847                 /* Word 9 */
2848                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2849                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2850
2851                 /* Word 10 */
2852                 if (xc)
2853                         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2854
2855                 /* Word 11 */
2856                 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2857                 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2858                         /* Bad response - embed it */
2859                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2860                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2861                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2862                                ((rsp->rsplen >> 2) - 1));
2863                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2864                 }
2865                 do_pbde = 0;
2866
2867                 /* Word 12 */
2868                 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2869
2870                 /* Use rspbuf, NOT sg list */
2871                 rsp->sg_cnt = 0;
2872                 sgl->word2 = 0;
2873                 atomic_inc(&tgtp->xmt_fcp_rsp);
2874                 break;
2875
2876         default:
2877                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2878                                 "6064 Unknown Rsp Op %d\n",
2879                                 rsp->op);
2880                 return NULL;
2881         }
2882
2883         nvmewqe->retry = 1;
2884         nvmewqe->vport = phba->pport;
2885         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2886         nvmewqe->context1 = ndlp;
2887
2888         for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
2889                 physaddr = sg_dma_address(sgel);
2890                 cnt = sg_dma_len(sgel);
2891                 sgl->addr_hi = putPaddrHigh(physaddr);
2892                 sgl->addr_lo = putPaddrLow(physaddr);
2893                 sgl->word2 = 0;
2894                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2895                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2896                 if ((i+1) == rsp->sg_cnt)
2897                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2898                 sgl->word2 = cpu_to_le32(sgl->word2);
2899                 sgl->sge_len = cpu_to_le32(cnt);
2900                 if (i == 0) {
2901                         bde = (struct ulp_bde64 *)&wqe->words[13];
2902                         if (do_pbde) {
2903                                 /* Words 13-15  (PBDE) */
2904                                 bde->addrLow = sgl->addr_lo;
2905                                 bde->addrHigh = sgl->addr_hi;
2906                                 bde->tus.f.bdeSize =
2907                                         le32_to_cpu(sgl->sge_len);
2908                                 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2909                                 bde->tus.w = cpu_to_le32(bde->tus.w);
2910                         } else {
2911                                 memset(bde, 0, sizeof(struct ulp_bde64));
2912                         }
2913                 }
2914                 sgl++;
2915                 ctxp->offset += cnt;
2916         }
2917         ctxp->state = LPFC_NVMET_STE_DATA;
2918         ctxp->entry_cnt++;
2919         return nvmewqe;
2920 }
2921
2922 /**
2923  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2924  * @phba: Pointer to HBA context object.
2925  * @cmdwqe: Pointer to driver command WQE object.
2926  * @wcqe: Pointer to driver response CQE object.
2927  *
2928  * The function is called from SLI ring event handler with no
2929  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2930  * The function frees memory resources used for the NVME commands.
2931  **/
2932 static void
2933 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2934                              struct lpfc_wcqe_complete *wcqe)
2935 {
2936         struct lpfc_nvmet_rcv_ctx *ctxp;
2937         struct lpfc_nvmet_tgtport *tgtp;
2938         uint32_t result;
2939         unsigned long flags;
2940         bool released = false;
2941
2942         ctxp = cmdwqe->context2;
2943         result = wcqe->parameter;
2944
2945         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2946         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2947                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2948
2949         spin_lock_irqsave(&ctxp->ctxlock, flags);
2950         ctxp->state = LPFC_NVMET_STE_DONE;
2951
2952         /* Check if we already received a free context call
2953          * and we have completed processing an abort situation.
2954          */
2955         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2956             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2957                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2958                 list_del_init(&ctxp->list);
2959                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2960                 released = true;
2961         }
2962         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2963         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2964         atomic_inc(&tgtp->xmt_abort_rsp);
2965
2966         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2967                         "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
2968                         "WCQE: %08x %08x %08x %08x\n",
2969                         ctxp->oxid, ctxp->flag, released,
2970                         wcqe->word0, wcqe->total_data_placed,
2971                         result, wcqe->word3);
2972
2973         cmdwqe->context2 = NULL;
2974         cmdwqe->context3 = NULL;
2975         /*
2976          * if transport has released ctx, then can reuse it. Otherwise,
2977          * will be recycled by transport release call.
2978          */
2979         if (released)
2980                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2981
2982         /* This is the iocbq for the abort, not the command */
2983         lpfc_sli_release_iocbq(phba, cmdwqe);
2984
2985         /* Since iaab/iaar are NOT set, there is no work left.
2986          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2987          * should have been called already.
2988          */
2989 }
2990
2991 /**
2992  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2993  * @phba: Pointer to HBA context object.
2994  * @cmdwqe: Pointer to driver command WQE object.
2995  * @wcqe: Pointer to driver response CQE object.
2996  *
2997  * The function is called from SLI ring event handler with no
2998  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2999  * The function frees memory resources used for the NVME commands.
3000  **/
3001 static void
3002 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3003                                struct lpfc_wcqe_complete *wcqe)
3004 {
3005         struct lpfc_nvmet_rcv_ctx *ctxp;
3006         struct lpfc_nvmet_tgtport *tgtp;
3007         unsigned long flags;
3008         uint32_t result;
3009         bool released = false;
3010
3011         ctxp = cmdwqe->context2;
3012         result = wcqe->parameter;
3013
3014         if (!ctxp) {
3015                 /* if context is clear, related io alrady complete */
3016                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3017                                 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3018                                 wcqe->word0, wcqe->total_data_placed,
3019                                 result, wcqe->word3);
3020                 return;
3021         }
3022
3023         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3024         spin_lock_irqsave(&ctxp->ctxlock, flags);
3025         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
3026                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3027
3028         /* Sanity check */
3029         if (ctxp->state != LPFC_NVMET_STE_ABORT) {
3030                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3031                                 "6112 ABTS Wrong state:%d oxid x%x\n",
3032                                 ctxp->state, ctxp->oxid);
3033         }
3034
3035         /* Check if we already received a free context call
3036          * and we have completed processing an abort situation.
3037          */
3038         ctxp->state = LPFC_NVMET_STE_DONE;
3039         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
3040             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
3041                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3042                 list_del_init(&ctxp->list);
3043                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3044                 released = true;
3045         }
3046         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3047         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3048         atomic_inc(&tgtp->xmt_abort_rsp);
3049
3050         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3051                         "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3052                         "WCQE: %08x %08x %08x %08x\n",
3053                         ctxp->oxid, ctxp->flag, released,
3054                         wcqe->word0, wcqe->total_data_placed,
3055                         result, wcqe->word3);
3056
3057         cmdwqe->context2 = NULL;
3058         cmdwqe->context3 = NULL;
3059         /*
3060          * if transport has released ctx, then can reuse it. Otherwise,
3061          * will be recycled by transport release call.
3062          */
3063         if (released)
3064                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3065
3066         /* Since iaab/iaar are NOT set, there is no work left.
3067          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3068          * should have been called already.
3069          */
3070 }
3071
3072 /**
3073  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3074  * @phba: Pointer to HBA context object.
3075  * @cmdwqe: Pointer to driver command WQE object.
3076  * @wcqe: Pointer to driver response CQE object.
3077  *
3078  * The function is called from SLI ring event handler with no
3079  * lock held. This function is the completion handler for NVME ABTS for LS cmds
3080  * The function frees memory resources used for the NVME commands.
3081  **/
3082 static void
3083 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3084                             struct lpfc_wcqe_complete *wcqe)
3085 {
3086         struct lpfc_nvmet_rcv_ctx *ctxp;
3087         struct lpfc_nvmet_tgtport *tgtp;
3088         uint32_t result;
3089
3090         ctxp = cmdwqe->context2;
3091         result = wcqe->parameter;
3092
3093         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3094         atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3095
3096         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3097                         "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3098                         ctxp, wcqe->word0, wcqe->total_data_placed,
3099                         result, wcqe->word3);
3100
3101         if (!ctxp) {
3102                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3103                                 "6415 NVMET LS Abort No ctx: WCQE: "
3104                                  "%08x %08x %08x %08x\n",
3105                                 wcqe->word0, wcqe->total_data_placed,
3106                                 result, wcqe->word3);
3107
3108                 lpfc_sli_release_iocbq(phba, cmdwqe);
3109                 return;
3110         }
3111
3112         if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
3113                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3114                                 "6416 NVMET LS abort cmpl state mismatch: "
3115                                 "oxid x%x: %d %d\n",
3116                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3117         }
3118
3119         cmdwqe->context2 = NULL;
3120         cmdwqe->context3 = NULL;
3121         lpfc_sli_release_iocbq(phba, cmdwqe);
3122         kfree(ctxp);
3123 }
3124
3125 static int
3126 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3127                              struct lpfc_nvmet_rcv_ctx *ctxp,
3128                              uint32_t sid, uint16_t xri)
3129 {
3130         struct lpfc_nvmet_tgtport *tgtp;
3131         struct lpfc_iocbq *abts_wqeq;
3132         union lpfc_wqe128 *wqe_abts;
3133         struct lpfc_nodelist *ndlp;
3134
3135         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3136                         "6067 ABTS: sid %x xri x%x/x%x\n",
3137                         sid, xri, ctxp->wqeq->sli4_xritag);
3138
3139         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3140
3141         ndlp = lpfc_findnode_did(phba->pport, sid);
3142         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3143             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3144             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3145                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3146                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3147                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3148                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3149
3150                 /* No failure to an ABTS request. */
3151                 return 0;
3152         }
3153
3154         abts_wqeq = ctxp->wqeq;
3155         wqe_abts = &abts_wqeq->wqe;
3156
3157         /*
3158          * Since we zero the whole WQE, we need to ensure we set the WQE fields
3159          * that were initialized in lpfc_sli4_nvmet_alloc.
3160          */
3161         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3162
3163         /* Word 5 */
3164         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3165         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3166         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3167         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3168         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3169
3170         /* Word 6 */
3171         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3172                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3173         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3174                abts_wqeq->sli4_xritag);
3175
3176         /* Word 7 */
3177         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3178                CMD_XMIT_SEQUENCE64_WQE);
3179         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3180         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3181         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3182
3183         /* Word 8 */
3184         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3185
3186         /* Word 9 */
3187         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3188         /* Needs to be set by caller */
3189         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3190
3191         /* Word 10 */
3192         bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3193         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3194         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3195                LPFC_WQE_LENLOC_WORD12);
3196         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3197         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3198
3199         /* Word 11 */
3200         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3201                LPFC_WQE_CQ_ID_DEFAULT);
3202         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3203                OTHER_COMMAND);
3204
3205         abts_wqeq->vport = phba->pport;
3206         abts_wqeq->context1 = ndlp;
3207         abts_wqeq->context2 = ctxp;
3208         abts_wqeq->context3 = NULL;
3209         abts_wqeq->rsvd2 = 0;
3210         /* hba_wqidx should already be setup from command we are aborting */
3211         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3212         abts_wqeq->iocb.ulpLe = 1;
3213
3214         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3215                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
3216                         xri, abts_wqeq->iotag);
3217         return 1;
3218 }
3219
3220 static int
3221 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3222                                struct lpfc_nvmet_rcv_ctx *ctxp,
3223                                uint32_t sid, uint16_t xri)
3224 {
3225         struct lpfc_nvmet_tgtport *tgtp;
3226         struct lpfc_iocbq *abts_wqeq;
3227         struct lpfc_nodelist *ndlp;
3228         unsigned long flags;
3229         u8 opt;
3230         int rc;
3231
3232         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3233         if (!ctxp->wqeq) {
3234                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3235                 ctxp->wqeq->hba_wqidx = 0;
3236         }
3237
3238         ndlp = lpfc_findnode_did(phba->pport, sid);
3239         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3240             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3241             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3242                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3243                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3244                                 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3245                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3246
3247                 /* No failure to an ABTS request. */
3248                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3249                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3250                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3251                 return 0;
3252         }
3253
3254         /* Issue ABTS for this WQE based on iotag */
3255         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3256         spin_lock_irqsave(&ctxp->ctxlock, flags);
3257         if (!ctxp->abort_wqeq) {
3258                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3259                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3260                                 "6161 ABORT failed: No wqeqs: "
3261                                 "xri: x%x\n", ctxp->oxid);
3262                 /* No failure to an ABTS request. */
3263                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3264                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3265                 return 0;
3266         }
3267         abts_wqeq = ctxp->abort_wqeq;
3268         ctxp->state = LPFC_NVMET_STE_ABORT;
3269         opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
3270         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3271
3272         /* Announce entry to new IO submit field. */
3273         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3274                         "6162 ABORT Request to rport DID x%06x "
3275                         "for xri x%x x%x\n",
3276                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3277
3278         /* If the hba is getting reset, this flag is set.  It is
3279          * cleared when the reset is complete and rings reestablished.
3280          */
3281         spin_lock_irqsave(&phba->hbalock, flags);
3282         /* driver queued commands are in process of being flushed */
3283         if (phba->hba_flag & HBA_IOQ_FLUSH) {
3284                 spin_unlock_irqrestore(&phba->hbalock, flags);
3285                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3286                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3287                                 "6163 Driver in reset cleanup - flushing "
3288                                 "NVME Req now. hba_flag x%x oxid x%x\n",
3289                                 phba->hba_flag, ctxp->oxid);
3290                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3291                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3292                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3293                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3294                 return 0;
3295         }
3296
3297         /* Outstanding abort is in progress */
3298         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3299                 spin_unlock_irqrestore(&phba->hbalock, flags);
3300                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3301                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3302                                 "6164 Outstanding NVME I/O Abort Request "
3303                                 "still pending on oxid x%x\n",
3304                                 ctxp->oxid);
3305                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3306                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3307                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3308                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3309                 return 0;
3310         }
3311
3312         /* Ready - mark outstanding as aborted by driver. */
3313         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3314
3315         lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3316
3317         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3318         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3319         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3320         abts_wqeq->iocb_cmpl = NULL;
3321         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3322         abts_wqeq->context2 = ctxp;
3323         abts_wqeq->vport = phba->pport;
3324         if (!ctxp->hdwq)
3325                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3326
3327         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3328         spin_unlock_irqrestore(&phba->hbalock, flags);
3329         if (rc == WQE_SUCCESS) {
3330                 atomic_inc(&tgtp->xmt_abort_sol);
3331                 return 0;
3332         }
3333
3334         atomic_inc(&tgtp->xmt_abort_rsp_error);
3335         spin_lock_irqsave(&ctxp->ctxlock, flags);
3336         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3337         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3338         lpfc_sli_release_iocbq(phba, abts_wqeq);
3339         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3340                         "6166 Failed ABORT issue_wqe with status x%x "
3341                         "for oxid x%x.\n",
3342                         rc, ctxp->oxid);
3343         return 1;
3344 }
3345
3346 static int
3347 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3348                                  struct lpfc_nvmet_rcv_ctx *ctxp,
3349                                  uint32_t sid, uint16_t xri)
3350 {
3351         struct lpfc_nvmet_tgtport *tgtp;
3352         struct lpfc_iocbq *abts_wqeq;
3353         unsigned long flags;
3354         bool released = false;
3355         int rc;
3356
3357         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3358         if (!ctxp->wqeq) {
3359                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3360                 ctxp->wqeq->hba_wqidx = 0;
3361         }
3362
3363         if (ctxp->state == LPFC_NVMET_STE_FREE) {
3364                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3365                                 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3366                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3367                 rc = WQE_BUSY;
3368                 goto aerr;
3369         }
3370         ctxp->state = LPFC_NVMET_STE_ABORT;
3371         ctxp->entry_cnt++;
3372         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3373         if (rc == 0)
3374                 goto aerr;
3375
3376         spin_lock_irqsave(&phba->hbalock, flags);
3377         abts_wqeq = ctxp->wqeq;
3378         abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3379         abts_wqeq->iocb_cmpl = NULL;
3380         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3381         if (!ctxp->hdwq)
3382                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3383
3384         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3385         spin_unlock_irqrestore(&phba->hbalock, flags);
3386         if (rc == WQE_SUCCESS) {
3387                 return 0;
3388         }
3389
3390 aerr:
3391         spin_lock_irqsave(&ctxp->ctxlock, flags);
3392         if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3393                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3394                 list_del_init(&ctxp->list);
3395                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3396                 released = true;
3397         }
3398         ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3399         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3400
3401         atomic_inc(&tgtp->xmt_abort_rsp_error);
3402         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3403                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3404                         "(%x)\n",
3405                         ctxp->oxid, rc, released);
3406         if (released)
3407                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3408         return 1;
3409 }
3410
3411 static int
3412 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3413                                 struct lpfc_nvmet_rcv_ctx *ctxp,
3414                                 uint32_t sid, uint16_t xri)
3415 {
3416         struct lpfc_nvmet_tgtport *tgtp;
3417         struct lpfc_iocbq *abts_wqeq;
3418         unsigned long flags;
3419         int rc;
3420
3421         if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3422             (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3423                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3424                 ctxp->entry_cnt++;
3425         } else {
3426                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3427                                 "6418 NVMET LS abort state mismatch "
3428                                 "IO x%x: %d %d\n",
3429                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3430                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3431         }
3432
3433         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3434         if (!ctxp->wqeq) {
3435                 /* Issue ABTS for this WQE based on iotag */
3436                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3437                 if (!ctxp->wqeq) {
3438                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3439                                         "6068 Abort failed: No wqeqs: "
3440                                         "xri: x%x\n", xri);
3441                         /* No failure to an ABTS request. */
3442                         kfree(ctxp);
3443                         return 0;
3444                 }
3445         }
3446         abts_wqeq = ctxp->wqeq;
3447
3448         if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3449                 rc = WQE_BUSY;
3450                 goto out;
3451         }
3452
3453         spin_lock_irqsave(&phba->hbalock, flags);
3454         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3455         abts_wqeq->iocb_cmpl = NULL;
3456         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3457         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3458         spin_unlock_irqrestore(&phba->hbalock, flags);
3459         if (rc == WQE_SUCCESS) {
3460                 atomic_inc(&tgtp->xmt_abort_unsol);
3461                 return 0;
3462         }
3463 out:
3464         atomic_inc(&tgtp->xmt_abort_rsp_error);
3465         abts_wqeq->context2 = NULL;
3466         abts_wqeq->context3 = NULL;
3467         lpfc_sli_release_iocbq(phba, abts_wqeq);
3468         kfree(ctxp);
3469         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3470                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
3471         return 0;
3472 }