]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/scsi/lpfc/lpfc_sli.c
scsi: bfa: Make restart_bfa static
[linux.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #ifdef CONFIG_X86
39 #include <asm/set_memory.h>
40 #endif
41
42 #include <linux/nvme-fc-driver.h>
43
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
60
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
63         LPFC_UNKNOWN_IOCB,
64         LPFC_UNSOL_IOCB,
65         LPFC_SOL_IOCB,
66         LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
68
69
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72                                   uint32_t);
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74                               uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76                                                          struct lpfc_iocbq *);
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78                                       struct hbq_dmabuf *);
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80                                           struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82                                    struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84                                        int);
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86                                      struct lpfc_queue *eq,
87                                      struct lpfc_eqe *eqe);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
90 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
91 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
92                                     struct lpfc_queue *cq,
93                                     struct lpfc_cqe *cqe);
94
95 static IOCB_t *
96 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
97 {
98         return &iocbq->iocb;
99 }
100
101 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
102 /**
103  * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
104  * @srcp: Source memory pointer.
105  * @destp: Destination memory pointer.
106  * @cnt: Number of words required to be copied.
107  *       Must be a multiple of sizeof(uint64_t)
108  *
109  * This function is used for copying data between driver memory
110  * and the SLI WQ. This function also changes the endianness
111  * of each word if native endianness is different from SLI
112  * endianness. This function can be called with or without
113  * lock.
114  **/
115 static void
116 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
117 {
118         uint64_t *src = srcp;
119         uint64_t *dest = destp;
120         int i;
121
122         for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
123                 *dest++ = *src++;
124 }
125 #else
126 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
127 #endif
128
129 /**
130  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
131  * @q: The Work Queue to operate on.
132  * @wqe: The work Queue Entry to put on the Work queue.
133  *
134  * This routine will copy the contents of @wqe to the next available entry on
135  * the @q. This function will then ring the Work Queue Doorbell to signal the
136  * HBA to start processing the Work Queue Entry. This function returns 0 if
137  * successful. If no entries are available on @q then this function will return
138  * -ENOMEM.
139  * The caller is expected to hold the hbalock when calling this routine.
140  **/
141 static int
142 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
143 {
144         union lpfc_wqe *temp_wqe;
145         struct lpfc_register doorbell;
146         uint32_t host_index;
147         uint32_t idx;
148         uint32_t i = 0;
149         uint8_t *tmp;
150         u32 if_type;
151
152         /* sanity check on queue memory */
153         if (unlikely(!q))
154                 return -ENOMEM;
155         temp_wqe = lpfc_sli4_qe(q, q->host_index);
156
157         /* If the host has not yet processed the next entry then we are done */
158         idx = ((q->host_index + 1) % q->entry_count);
159         if (idx == q->hba_index) {
160                 q->WQ_overflow++;
161                 return -EBUSY;
162         }
163         q->WQ_posted++;
164         /* set consumption flag every once in a while */
165         if (!((q->host_index + 1) % q->notify_interval))
166                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
167         else
168                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
169         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
170                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
171         lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
172         if (q->dpp_enable && q->phba->cfg_enable_dpp) {
173                 /* write to DPP aperture taking advatage of Combined Writes */
174                 tmp = (uint8_t *)temp_wqe;
175 #ifdef __raw_writeq
176                 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
177                         __raw_writeq(*((uint64_t *)(tmp + i)),
178                                         q->dpp_regaddr + i);
179 #else
180                 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
181                         __raw_writel(*((uint32_t *)(tmp + i)),
182                                         q->dpp_regaddr + i);
183 #endif
184         }
185         /* ensure WQE bcopy and DPP flushed before doorbell write */
186         wmb();
187
188         /* Update the host index before invoking device */
189         host_index = q->host_index;
190
191         q->host_index = idx;
192
193         /* Ring Doorbell */
194         doorbell.word0 = 0;
195         if (q->db_format == LPFC_DB_LIST_FORMAT) {
196                 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
197                         bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
198                         bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
199                         bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
200                             q->dpp_id);
201                         bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
202                             q->queue_id);
203                 } else {
204                         bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
205                         bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
206
207                         /* Leave bits <23:16> clear for if_type 6 dpp */
208                         if_type = bf_get(lpfc_sli_intf_if_type,
209                                          &q->phba->sli4_hba.sli_intf);
210                         if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
211                                 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
212                                        host_index);
213                 }
214         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
215                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
216                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
217         } else {
218                 return -EINVAL;
219         }
220         writel(doorbell.word0, q->db_regaddr);
221
222         return 0;
223 }
224
225 /**
226  * lpfc_sli4_wq_release - Updates internal hba index for WQ
227  * @q: The Work Queue to operate on.
228  * @index: The index to advance the hba index to.
229  *
230  * This routine will update the HBA index of a queue to reflect consumption of
231  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
232  * an entry the host calls this function to update the queue's internal
233  * pointers. This routine returns the number of entries that were consumed by
234  * the HBA.
235  **/
236 static uint32_t
237 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
238 {
239         uint32_t released = 0;
240
241         /* sanity check on queue memory */
242         if (unlikely(!q))
243                 return 0;
244
245         if (q->hba_index == index)
246                 return 0;
247         do {
248                 q->hba_index = ((q->hba_index + 1) % q->entry_count);
249                 released++;
250         } while (q->hba_index != index);
251         return released;
252 }
253
254 /**
255  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
256  * @q: The Mailbox Queue to operate on.
257  * @wqe: The Mailbox Queue Entry to put on the Work queue.
258  *
259  * This routine will copy the contents of @mqe to the next available entry on
260  * the @q. This function will then ring the Work Queue Doorbell to signal the
261  * HBA to start processing the Work Queue Entry. This function returns 0 if
262  * successful. If no entries are available on @q then this function will return
263  * -ENOMEM.
264  * The caller is expected to hold the hbalock when calling this routine.
265  **/
266 static uint32_t
267 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
268 {
269         struct lpfc_mqe *temp_mqe;
270         struct lpfc_register doorbell;
271
272         /* sanity check on queue memory */
273         if (unlikely(!q))
274                 return -ENOMEM;
275         temp_mqe = lpfc_sli4_qe(q, q->host_index);
276
277         /* If the host has not yet processed the next entry then we are done */
278         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
279                 return -ENOMEM;
280         lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
281         /* Save off the mailbox pointer for completion */
282         q->phba->mbox = (MAILBOX_t *)temp_mqe;
283
284         /* Update the host index before invoking device */
285         q->host_index = ((q->host_index + 1) % q->entry_count);
286
287         /* Ring Doorbell */
288         doorbell.word0 = 0;
289         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
290         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
291         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
292         return 0;
293 }
294
295 /**
296  * lpfc_sli4_mq_release - Updates internal hba index for MQ
297  * @q: The Mailbox Queue to operate on.
298  *
299  * This routine will update the HBA index of a queue to reflect consumption of
300  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
301  * an entry the host calls this function to update the queue's internal
302  * pointers. This routine returns the number of entries that were consumed by
303  * the HBA.
304  **/
305 static uint32_t
306 lpfc_sli4_mq_release(struct lpfc_queue *q)
307 {
308         /* sanity check on queue memory */
309         if (unlikely(!q))
310                 return 0;
311
312         /* Clear the mailbox pointer for completion */
313         q->phba->mbox = NULL;
314         q->hba_index = ((q->hba_index + 1) % q->entry_count);
315         return 1;
316 }
317
318 /**
319  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
320  * @q: The Event Queue to get the first valid EQE from
321  *
322  * This routine will get the first valid Event Queue Entry from @q, update
323  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
324  * the Queue (no more work to do), or the Queue is full of EQEs that have been
325  * processed, but not popped back to the HBA then this routine will return NULL.
326  **/
327 static struct lpfc_eqe *
328 lpfc_sli4_eq_get(struct lpfc_queue *q)
329 {
330         struct lpfc_eqe *eqe;
331
332         /* sanity check on queue memory */
333         if (unlikely(!q))
334                 return NULL;
335         eqe = lpfc_sli4_qe(q, q->host_index);
336
337         /* If the next EQE is not valid then we are done */
338         if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
339                 return NULL;
340
341         /*
342          * insert barrier for instruction interlock : data from the hardware
343          * must have the valid bit checked before it can be copied and acted
344          * upon. Speculative instructions were allowing a bcopy at the start
345          * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
346          * after our return, to copy data before the valid bit check above
347          * was done. As such, some of the copied data was stale. The barrier
348          * ensures the check is before any data is copied.
349          */
350         mb();
351         return eqe;
352 }
353
354 /**
355  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
356  * @q: The Event Queue to disable interrupts
357  *
358  **/
359 void
360 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
361 {
362         struct lpfc_register doorbell;
363
364         doorbell.word0 = 0;
365         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
366         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
367         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
368                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
369         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
370         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
371 }
372
373 /**
374  * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
375  * @q: The Event Queue to disable interrupts
376  *
377  **/
378 void
379 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
380 {
381         struct lpfc_register doorbell;
382
383         doorbell.word0 = 0;
384         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
385         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
386 }
387
388 /**
389  * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
390  * @phba: adapter with EQ
391  * @q: The Event Queue that the host has completed processing for.
392  * @count: Number of elements that have been consumed
393  * @arm: Indicates whether the host wants to arms this CQ.
394  *
395  * This routine will notify the HBA, by ringing the doorbell, that count
396  * number of EQEs have been processed. The @arm parameter indicates whether
397  * the queue should be rearmed when ringing the doorbell.
398  **/
399 void
400 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
401                      uint32_t count, bool arm)
402 {
403         struct lpfc_register doorbell;
404
405         /* sanity check on queue memory */
406         if (unlikely(!q || (count == 0 && !arm)))
407                 return;
408
409         /* ring doorbell for number popped */
410         doorbell.word0 = 0;
411         if (arm) {
412                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
413                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
414         }
415         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
416         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
417         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
418                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
419         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
420         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
421         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
422         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
423                 readl(q->phba->sli4_hba.EQDBregaddr);
424 }
425
426 /**
427  * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
428  * @phba: adapter with EQ
429  * @q: The Event Queue that the host has completed processing for.
430  * @count: Number of elements that have been consumed
431  * @arm: Indicates whether the host wants to arms this CQ.
432  *
433  * This routine will notify the HBA, by ringing the doorbell, that count
434  * number of EQEs have been processed. The @arm parameter indicates whether
435  * the queue should be rearmed when ringing the doorbell.
436  **/
437 void
438 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
439                           uint32_t count, bool arm)
440 {
441         struct lpfc_register doorbell;
442
443         /* sanity check on queue memory */
444         if (unlikely(!q || (count == 0 && !arm)))
445                 return;
446
447         /* ring doorbell for number popped */
448         doorbell.word0 = 0;
449         if (arm)
450                 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
451         bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
452         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
453         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
454         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
455         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
456                 readl(q->phba->sli4_hba.EQDBregaddr);
457 }
458
459 static void
460 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
461                         struct lpfc_eqe *eqe)
462 {
463         if (!phba->sli4_hba.pc_sli4_params.eqav)
464                 bf_set_le32(lpfc_eqe_valid, eqe, 0);
465
466         eq->host_index = ((eq->host_index + 1) % eq->entry_count);
467
468         /* if the index wrapped around, toggle the valid bit */
469         if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
470                 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
471 }
472
473 static void
474 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
475 {
476         struct lpfc_eqe *eqe = NULL;
477         u32 eq_count = 0, cq_count = 0;
478         struct lpfc_cqe *cqe = NULL;
479         struct lpfc_queue *cq = NULL, *childq = NULL;
480         int cqid = 0;
481
482         /* walk all the EQ entries and drop on the floor */
483         eqe = lpfc_sli4_eq_get(eq);
484         while (eqe) {
485                 /* Get the reference to the corresponding CQ */
486                 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
487                 cq = NULL;
488
489                 list_for_each_entry(childq, &eq->child_list, list) {
490                         if (childq->queue_id == cqid) {
491                                 cq = childq;
492                                 break;
493                         }
494                 }
495                 /* If CQ is valid, iterate through it and drop all the CQEs */
496                 if (cq) {
497                         cqe = lpfc_sli4_cq_get(cq);
498                         while (cqe) {
499                                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
500                                 cq_count++;
501                                 cqe = lpfc_sli4_cq_get(cq);
502                         }
503                         /* Clear and re-arm the CQ */
504                         phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
505                             LPFC_QUEUE_REARM);
506                         cq_count = 0;
507                 }
508                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
509                 eq_count++;
510                 eqe = lpfc_sli4_eq_get(eq);
511         }
512
513         /* Clear and re-arm the EQ */
514         phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
515 }
516
517 static int
518 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
519 {
520         struct lpfc_eqe *eqe;
521         int count = 0, consumed = 0;
522
523         if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
524                 goto rearm_and_exit;
525
526         eqe = lpfc_sli4_eq_get(eq);
527         while (eqe) {
528                 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
529                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
530
531                 consumed++;
532                 if (!(++count % eq->max_proc_limit))
533                         break;
534
535                 if (!(count % eq->notify_interval)) {
536                         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
537                                                         LPFC_QUEUE_NOARM);
538                         consumed = 0;
539                 }
540
541                 eqe = lpfc_sli4_eq_get(eq);
542         }
543         eq->EQ_processed += count;
544
545         /* Track the max number of EQEs processed in 1 intr */
546         if (count > eq->EQ_max_eqe)
547                 eq->EQ_max_eqe = count;
548
549         eq->queue_claimed = 0;
550
551 rearm_and_exit:
552         /* Always clear and re-arm the EQ */
553         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
554
555         return count;
556 }
557
558 /**
559  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
560  * @q: The Completion Queue to get the first valid CQE from
561  *
562  * This routine will get the first valid Completion Queue Entry from @q, update
563  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
564  * the Queue (no more work to do), or the Queue is full of CQEs that have been
565  * processed, but not popped back to the HBA then this routine will return NULL.
566  **/
567 static struct lpfc_cqe *
568 lpfc_sli4_cq_get(struct lpfc_queue *q)
569 {
570         struct lpfc_cqe *cqe;
571
572         /* sanity check on queue memory */
573         if (unlikely(!q))
574                 return NULL;
575         cqe = lpfc_sli4_qe(q, q->host_index);
576
577         /* If the next CQE is not valid then we are done */
578         if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
579                 return NULL;
580
581         /*
582          * insert barrier for instruction interlock : data from the hardware
583          * must have the valid bit checked before it can be copied and acted
584          * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
585          * instructions allowing action on content before valid bit checked,
586          * add barrier here as well. May not be needed as "content" is a
587          * single 32-bit entity here (vs multi word structure for cq's).
588          */
589         mb();
590         return cqe;
591 }
592
593 static void
594 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
595                         struct lpfc_cqe *cqe)
596 {
597         if (!phba->sli4_hba.pc_sli4_params.cqav)
598                 bf_set_le32(lpfc_cqe_valid, cqe, 0);
599
600         cq->host_index = ((cq->host_index + 1) % cq->entry_count);
601
602         /* if the index wrapped around, toggle the valid bit */
603         if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
604                 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
605 }
606
607 /**
608  * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
609  * @phba: the adapter with the CQ
610  * @q: The Completion Queue that the host has completed processing for.
611  * @count: the number of elements that were consumed
612  * @arm: Indicates whether the host wants to arms this CQ.
613  *
614  * This routine will notify the HBA, by ringing the doorbell, that the
615  * CQEs have been processed. The @arm parameter specifies whether the
616  * queue should be rearmed when ringing the doorbell.
617  **/
618 void
619 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
620                      uint32_t count, bool arm)
621 {
622         struct lpfc_register doorbell;
623
624         /* sanity check on queue memory */
625         if (unlikely(!q || (count == 0 && !arm)))
626                 return;
627
628         /* ring doorbell for number popped */
629         doorbell.word0 = 0;
630         if (arm)
631                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
632         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
633         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
634         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
635                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
636         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
637         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
638 }
639
640 /**
641  * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
642  * @phba: the adapter with the CQ
643  * @q: The Completion Queue that the host has completed processing for.
644  * @count: the number of elements that were consumed
645  * @arm: Indicates whether the host wants to arms this CQ.
646  *
647  * This routine will notify the HBA, by ringing the doorbell, that the
648  * CQEs have been processed. The @arm parameter specifies whether the
649  * queue should be rearmed when ringing the doorbell.
650  **/
651 void
652 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
653                          uint32_t count, bool arm)
654 {
655         struct lpfc_register doorbell;
656
657         /* sanity check on queue memory */
658         if (unlikely(!q || (count == 0 && !arm)))
659                 return;
660
661         /* ring doorbell for number popped */
662         doorbell.word0 = 0;
663         if (arm)
664                 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
665         bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
666         bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
667         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
668 }
669
670 /**
671  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
672  * @q: The Header Receive Queue to operate on.
673  * @wqe: The Receive Queue Entry to put on the Receive queue.
674  *
675  * This routine will copy the contents of @wqe to the next available entry on
676  * the @q. This function will then ring the Receive Queue Doorbell to signal the
677  * HBA to start processing the Receive Queue Entry. This function returns the
678  * index that the rqe was copied to if successful. If no entries are available
679  * on @q then this function will return -ENOMEM.
680  * The caller is expected to hold the hbalock when calling this routine.
681  **/
682 int
683 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
684                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
685 {
686         struct lpfc_rqe *temp_hrqe;
687         struct lpfc_rqe *temp_drqe;
688         struct lpfc_register doorbell;
689         int hq_put_index;
690         int dq_put_index;
691
692         /* sanity check on queue memory */
693         if (unlikely(!hq) || unlikely(!dq))
694                 return -ENOMEM;
695         hq_put_index = hq->host_index;
696         dq_put_index = dq->host_index;
697         temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
698         temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
699
700         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
701                 return -EINVAL;
702         if (hq_put_index != dq_put_index)
703                 return -EINVAL;
704         /* If the host has not yet processed the next entry then we are done */
705         if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
706                 return -EBUSY;
707         lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
708         lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
709
710         /* Update the host index to point to the next slot */
711         hq->host_index = ((hq_put_index + 1) % hq->entry_count);
712         dq->host_index = ((dq_put_index + 1) % dq->entry_count);
713         hq->RQ_buf_posted++;
714
715         /* Ring The Header Receive Queue Doorbell */
716         if (!(hq->host_index % hq->notify_interval)) {
717                 doorbell.word0 = 0;
718                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
719                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
720                                hq->notify_interval);
721                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
722                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
723                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
724                                hq->notify_interval);
725                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
726                                hq->host_index);
727                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
728                 } else {
729                         return -EINVAL;
730                 }
731                 writel(doorbell.word0, hq->db_regaddr);
732         }
733         return hq_put_index;
734 }
735
736 /**
737  * lpfc_sli4_rq_release - Updates internal hba index for RQ
738  * @q: The Header Receive Queue to operate on.
739  *
740  * This routine will update the HBA index of a queue to reflect consumption of
741  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
742  * consumed an entry the host calls this function to update the queue's
743  * internal pointers. This routine returns the number of entries that were
744  * consumed by the HBA.
745  **/
746 static uint32_t
747 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
748 {
749         /* sanity check on queue memory */
750         if (unlikely(!hq) || unlikely(!dq))
751                 return 0;
752
753         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
754                 return 0;
755         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
756         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
757         return 1;
758 }
759
760 /**
761  * lpfc_cmd_iocb - Get next command iocb entry in the ring
762  * @phba: Pointer to HBA context object.
763  * @pring: Pointer to driver SLI ring object.
764  *
765  * This function returns pointer to next command iocb entry
766  * in the command ring. The caller must hold hbalock to prevent
767  * other threads consume the next command iocb.
768  * SLI-2/SLI-3 provide different sized iocbs.
769  **/
770 static inline IOCB_t *
771 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
772 {
773         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
774                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
775 }
776
777 /**
778  * lpfc_resp_iocb - Get next response iocb entry in the ring
779  * @phba: Pointer to HBA context object.
780  * @pring: Pointer to driver SLI ring object.
781  *
782  * This function returns pointer to next response iocb entry
783  * in the response ring. The caller must hold hbalock to make sure
784  * that no other thread consume the next response iocb.
785  * SLI-2/SLI-3 provide different sized iocbs.
786  **/
787 static inline IOCB_t *
788 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
789 {
790         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
791                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
792 }
793
794 /**
795  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
796  * @phba: Pointer to HBA context object.
797  *
798  * This function is called with hbalock held. This function
799  * allocates a new driver iocb object from the iocb pool. If the
800  * allocation is successful, it returns pointer to the newly
801  * allocated iocb object else it returns NULL.
802  **/
803 struct lpfc_iocbq *
804 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
805 {
806         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
807         struct lpfc_iocbq * iocbq = NULL;
808
809         lockdep_assert_held(&phba->hbalock);
810
811         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
812         if (iocbq)
813                 phba->iocb_cnt++;
814         if (phba->iocb_cnt > phba->iocb_max)
815                 phba->iocb_max = phba->iocb_cnt;
816         return iocbq;
817 }
818
819 /**
820  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
821  * @phba: Pointer to HBA context object.
822  * @xritag: XRI value.
823  *
824  * This function clears the sglq pointer from the array of acive
825  * sglq's. The xritag that is passed in is used to index into the
826  * array. Before the xritag can be used it needs to be adjusted
827  * by subtracting the xribase.
828  *
829  * Returns sglq ponter = success, NULL = Failure.
830  **/
831 struct lpfc_sglq *
832 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
833 {
834         struct lpfc_sglq *sglq;
835
836         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
837         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
838         return sglq;
839 }
840
841 /**
842  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
843  * @phba: Pointer to HBA context object.
844  * @xritag: XRI value.
845  *
846  * This function returns the sglq pointer from the array of acive
847  * sglq's. The xritag that is passed in is used to index into the
848  * array. Before the xritag can be used it needs to be adjusted
849  * by subtracting the xribase.
850  *
851  * Returns sglq ponter = success, NULL = Failure.
852  **/
853 struct lpfc_sglq *
854 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
855 {
856         struct lpfc_sglq *sglq;
857
858         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
859         return sglq;
860 }
861
862 /**
863  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
864  * @phba: Pointer to HBA context object.
865  * @xritag: xri used in this exchange.
866  * @rrq: The RRQ to be cleared.
867  *
868  **/
869 void
870 lpfc_clr_rrq_active(struct lpfc_hba *phba,
871                     uint16_t xritag,
872                     struct lpfc_node_rrq *rrq)
873 {
874         struct lpfc_nodelist *ndlp = NULL;
875
876         if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
877                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
878
879         /* The target DID could have been swapped (cable swap)
880          * we should use the ndlp from the findnode if it is
881          * available.
882          */
883         if ((!ndlp) && rrq->ndlp)
884                 ndlp = rrq->ndlp;
885
886         if (!ndlp)
887                 goto out;
888
889         if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
890                 rrq->send_rrq = 0;
891                 rrq->xritag = 0;
892                 rrq->rrq_stop_time = 0;
893         }
894 out:
895         mempool_free(rrq, phba->rrq_pool);
896 }
897
898 /**
899  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
900  * @phba: Pointer to HBA context object.
901  *
902  * This function is called with hbalock held. This function
903  * Checks if stop_time (ratov from setting rrq active) has
904  * been reached, if it has and the send_rrq flag is set then
905  * it will call lpfc_send_rrq. If the send_rrq flag is not set
906  * then it will just call the routine to clear the rrq and
907  * free the rrq resource.
908  * The timer is set to the next rrq that is going to expire before
909  * leaving the routine.
910  *
911  **/
912 void
913 lpfc_handle_rrq_active(struct lpfc_hba *phba)
914 {
915         struct lpfc_node_rrq *rrq;
916         struct lpfc_node_rrq *nextrrq;
917         unsigned long next_time;
918         unsigned long iflags;
919         LIST_HEAD(send_rrq);
920
921         spin_lock_irqsave(&phba->hbalock, iflags);
922         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
923         next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
924         list_for_each_entry_safe(rrq, nextrrq,
925                                  &phba->active_rrq_list, list) {
926                 if (time_after(jiffies, rrq->rrq_stop_time))
927                         list_move(&rrq->list, &send_rrq);
928                 else if (time_before(rrq->rrq_stop_time, next_time))
929                         next_time = rrq->rrq_stop_time;
930         }
931         spin_unlock_irqrestore(&phba->hbalock, iflags);
932         if ((!list_empty(&phba->active_rrq_list)) &&
933             (!(phba->pport->load_flag & FC_UNLOADING)))
934                 mod_timer(&phba->rrq_tmr, next_time);
935         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
936                 list_del(&rrq->list);
937                 if (!rrq->send_rrq) {
938                         /* this call will free the rrq */
939                         lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
940                 } else if (lpfc_send_rrq(phba, rrq)) {
941                         /* if we send the rrq then the completion handler
942                         *  will clear the bit in the xribitmap.
943                         */
944                         lpfc_clr_rrq_active(phba, rrq->xritag,
945                                             rrq);
946                 }
947         }
948 }
949
950 /**
951  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
952  * @vport: Pointer to vport context object.
953  * @xri: The xri used in the exchange.
954  * @did: The targets DID for this exchange.
955  *
956  * returns NULL = rrq not found in the phba->active_rrq_list.
957  *         rrq = rrq for this xri and target.
958  **/
959 struct lpfc_node_rrq *
960 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
961 {
962         struct lpfc_hba *phba = vport->phba;
963         struct lpfc_node_rrq *rrq;
964         struct lpfc_node_rrq *nextrrq;
965         unsigned long iflags;
966
967         if (phba->sli_rev != LPFC_SLI_REV4)
968                 return NULL;
969         spin_lock_irqsave(&phba->hbalock, iflags);
970         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
971                 if (rrq->vport == vport && rrq->xritag == xri &&
972                                 rrq->nlp_DID == did){
973                         list_del(&rrq->list);
974                         spin_unlock_irqrestore(&phba->hbalock, iflags);
975                         return rrq;
976                 }
977         }
978         spin_unlock_irqrestore(&phba->hbalock, iflags);
979         return NULL;
980 }
981
982 /**
983  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
984  * @vport: Pointer to vport context object.
985  * @ndlp: Pointer to the lpfc_node_list structure.
986  * If ndlp is NULL Remove all active RRQs for this vport from the
987  * phba->active_rrq_list and clear the rrq.
988  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
989  **/
990 void
991 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
992
993 {
994         struct lpfc_hba *phba = vport->phba;
995         struct lpfc_node_rrq *rrq;
996         struct lpfc_node_rrq *nextrrq;
997         unsigned long iflags;
998         LIST_HEAD(rrq_list);
999
1000         if (phba->sli_rev != LPFC_SLI_REV4)
1001                 return;
1002         if (!ndlp) {
1003                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1004                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1005         }
1006         spin_lock_irqsave(&phba->hbalock, iflags);
1007         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
1008                 if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
1009                         list_move(&rrq->list, &rrq_list);
1010         spin_unlock_irqrestore(&phba->hbalock, iflags);
1011
1012         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1013                 list_del(&rrq->list);
1014                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1015         }
1016 }
1017
1018 /**
1019  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1020  * @phba: Pointer to HBA context object.
1021  * @ndlp: Targets nodelist pointer for this exchange.
1022  * @xritag the xri in the bitmap to test.
1023  *
1024  * This function returns:
1025  * 0 = rrq not active for this xri
1026  * 1 = rrq is valid for this xri.
1027  **/
1028 int
1029 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1030                         uint16_t  xritag)
1031 {
1032         if (!ndlp)
1033                 return 0;
1034         if (!ndlp->active_rrqs_xri_bitmap)
1035                 return 0;
1036         if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1037                 return 1;
1038         else
1039                 return 0;
1040 }
1041
1042 /**
1043  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1044  * @phba: Pointer to HBA context object.
1045  * @ndlp: nodelist pointer for this target.
1046  * @xritag: xri used in this exchange.
1047  * @rxid: Remote Exchange ID.
1048  * @send_rrq: Flag used to determine if we should send rrq els cmd.
1049  *
1050  * This function takes the hbalock.
1051  * The active bit is always set in the active rrq xri_bitmap even
1052  * if there is no slot avaiable for the other rrq information.
1053  *
1054  * returns 0 rrq actived for this xri
1055  *         < 0 No memory or invalid ndlp.
1056  **/
1057 int
1058 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1059                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1060 {
1061         unsigned long iflags;
1062         struct lpfc_node_rrq *rrq;
1063         int empty;
1064
1065         if (!ndlp)
1066                 return -EINVAL;
1067
1068         if (!phba->cfg_enable_rrq)
1069                 return -EINVAL;
1070
1071         spin_lock_irqsave(&phba->hbalock, iflags);
1072         if (phba->pport->load_flag & FC_UNLOADING) {
1073                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1074                 goto out;
1075         }
1076
1077         /*
1078          * set the active bit even if there is no mem available.
1079          */
1080         if (NLP_CHK_FREE_REQ(ndlp))
1081                 goto out;
1082
1083         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1084                 goto out;
1085
1086         if (!ndlp->active_rrqs_xri_bitmap)
1087                 goto out;
1088
1089         if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1090                 goto out;
1091
1092         spin_unlock_irqrestore(&phba->hbalock, iflags);
1093         rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1094         if (!rrq) {
1095                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1096                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1097                                 " DID:0x%x Send:%d\n",
1098                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
1099                 return -EINVAL;
1100         }
1101         if (phba->cfg_enable_rrq == 1)
1102                 rrq->send_rrq = send_rrq;
1103         else
1104                 rrq->send_rrq = 0;
1105         rrq->xritag = xritag;
1106         rrq->rrq_stop_time = jiffies +
1107                                 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1108         rrq->ndlp = ndlp;
1109         rrq->nlp_DID = ndlp->nlp_DID;
1110         rrq->vport = ndlp->vport;
1111         rrq->rxid = rxid;
1112         spin_lock_irqsave(&phba->hbalock, iflags);
1113         empty = list_empty(&phba->active_rrq_list);
1114         list_add_tail(&rrq->list, &phba->active_rrq_list);
1115         phba->hba_flag |= HBA_RRQ_ACTIVE;
1116         if (empty)
1117                 lpfc_worker_wake_up(phba);
1118         spin_unlock_irqrestore(&phba->hbalock, iflags);
1119         return 0;
1120 out:
1121         spin_unlock_irqrestore(&phba->hbalock, iflags);
1122         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1123                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1124                         " DID:0x%x Send:%d\n",
1125                         xritag, rxid, ndlp->nlp_DID, send_rrq);
1126         return -EINVAL;
1127 }
1128
1129 /**
1130  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1131  * @phba: Pointer to HBA context object.
1132  * @piocb: Pointer to the iocbq.
1133  *
1134  * The driver calls this function with either the nvme ls ring lock
1135  * or the fc els ring lock held depending on the iocb usage.  This function
1136  * gets a new driver sglq object from the sglq list. If the list is not empty
1137  * then it is successful, it returns pointer to the newly allocated sglq
1138  * object else it returns NULL.
1139  **/
1140 static struct lpfc_sglq *
1141 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1142 {
1143         struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1144         struct lpfc_sglq *sglq = NULL;
1145         struct lpfc_sglq *start_sglq = NULL;
1146         struct lpfc_io_buf *lpfc_cmd;
1147         struct lpfc_nodelist *ndlp;
1148         struct lpfc_sli_ring *pring = NULL;
1149         int found = 0;
1150
1151         if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1152                 pring =  phba->sli4_hba.nvmels_wq->pring;
1153         else
1154                 pring = lpfc_phba_elsring(phba);
1155
1156         lockdep_assert_held(&pring->ring_lock);
1157
1158         if (piocbq->iocb_flag &  LPFC_IO_FCP) {
1159                 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1160                 ndlp = lpfc_cmd->rdata->pnode;
1161         } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1162                         !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1163                 ndlp = piocbq->context_un.ndlp;
1164         } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1165                 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1166                         ndlp = NULL;
1167                 else
1168                         ndlp = piocbq->context_un.ndlp;
1169         } else {
1170                 ndlp = piocbq->context1;
1171         }
1172
1173         spin_lock(&phba->sli4_hba.sgl_list_lock);
1174         list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1175         start_sglq = sglq;
1176         while (!found) {
1177                 if (!sglq)
1178                         break;
1179                 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1180                     test_bit(sglq->sli4_lxritag,
1181                     ndlp->active_rrqs_xri_bitmap)) {
1182                         /* This xri has an rrq outstanding for this DID.
1183                          * put it back in the list and get another xri.
1184                          */
1185                         list_add_tail(&sglq->list, lpfc_els_sgl_list);
1186                         sglq = NULL;
1187                         list_remove_head(lpfc_els_sgl_list, sglq,
1188                                                 struct lpfc_sglq, list);
1189                         if (sglq == start_sglq) {
1190                                 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1191                                 sglq = NULL;
1192                                 break;
1193                         } else
1194                                 continue;
1195                 }
1196                 sglq->ndlp = ndlp;
1197                 found = 1;
1198                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1199                 sglq->state = SGL_ALLOCATED;
1200         }
1201         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1202         return sglq;
1203 }
1204
1205 /**
1206  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1207  * @phba: Pointer to HBA context object.
1208  * @piocb: Pointer to the iocbq.
1209  *
1210  * This function is called with the sgl_list lock held. This function
1211  * gets a new driver sglq object from the sglq list. If the
1212  * list is not empty then it is successful, it returns pointer to the newly
1213  * allocated sglq object else it returns NULL.
1214  **/
1215 struct lpfc_sglq *
1216 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1217 {
1218         struct list_head *lpfc_nvmet_sgl_list;
1219         struct lpfc_sglq *sglq = NULL;
1220
1221         lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1222
1223         lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1224
1225         list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1226         if (!sglq)
1227                 return NULL;
1228         phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1229         sglq->state = SGL_ALLOCATED;
1230         return sglq;
1231 }
1232
1233 /**
1234  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1235  * @phba: Pointer to HBA context object.
1236  *
1237  * This function is called with no lock held. This function
1238  * allocates a new driver iocb object from the iocb pool. If the
1239  * allocation is successful, it returns pointer to the newly
1240  * allocated iocb object else it returns NULL.
1241  **/
1242 struct lpfc_iocbq *
1243 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1244 {
1245         struct lpfc_iocbq * iocbq = NULL;
1246         unsigned long iflags;
1247
1248         spin_lock_irqsave(&phba->hbalock, iflags);
1249         iocbq = __lpfc_sli_get_iocbq(phba);
1250         spin_unlock_irqrestore(&phba->hbalock, iflags);
1251         return iocbq;
1252 }
1253
1254 /**
1255  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1256  * @phba: Pointer to HBA context object.
1257  * @iocbq: Pointer to driver iocb object.
1258  *
1259  * This function is called with hbalock held to release driver
1260  * iocb object to the iocb pool. The iotag in the iocb object
1261  * does not change for each use of the iocb object. This function
1262  * clears all other fields of the iocb object when it is freed.
1263  * The sqlq structure that holds the xritag and phys and virtual
1264  * mappings for the scatter gather list is retrieved from the
1265  * active array of sglq. The get of the sglq pointer also clears
1266  * the entry in the array. If the status of the IO indiactes that
1267  * this IO was aborted then the sglq entry it put on the
1268  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1269  * IO has good status or fails for any other reason then the sglq
1270  * entry is added to the free list (lpfc_els_sgl_list).
1271  **/
1272 static void
1273 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1274 {
1275         struct lpfc_sglq *sglq;
1276         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1277         unsigned long iflag = 0;
1278         struct lpfc_sli_ring *pring;
1279
1280         lockdep_assert_held(&phba->hbalock);
1281
1282         if (iocbq->sli4_xritag == NO_XRI)
1283                 sglq = NULL;
1284         else
1285                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1286
1287
1288         if (sglq)  {
1289                 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1290                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1291                                           iflag);
1292                         sglq->state = SGL_FREED;
1293                         sglq->ndlp = NULL;
1294                         list_add_tail(&sglq->list,
1295                                       &phba->sli4_hba.lpfc_nvmet_sgl_list);
1296                         spin_unlock_irqrestore(
1297                                 &phba->sli4_hba.sgl_list_lock, iflag);
1298                         goto out;
1299                 }
1300
1301                 pring = phba->sli4_hba.els_wq->pring;
1302                 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1303                         (sglq->state != SGL_XRI_ABORTED)) {
1304                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1305                                           iflag);
1306                         list_add(&sglq->list,
1307                                  &phba->sli4_hba.lpfc_abts_els_sgl_list);
1308                         spin_unlock_irqrestore(
1309                                 &phba->sli4_hba.sgl_list_lock, iflag);
1310                 } else {
1311                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1312                                           iflag);
1313                         sglq->state = SGL_FREED;
1314                         sglq->ndlp = NULL;
1315                         list_add_tail(&sglq->list,
1316                                       &phba->sli4_hba.lpfc_els_sgl_list);
1317                         spin_unlock_irqrestore(
1318                                 &phba->sli4_hba.sgl_list_lock, iflag);
1319
1320                         /* Check if TXQ queue needs to be serviced */
1321                         if (!list_empty(&pring->txq))
1322                                 lpfc_worker_wake_up(phba);
1323                 }
1324         }
1325
1326 out:
1327         /*
1328          * Clean all volatile data fields, preserve iotag and node struct.
1329          */
1330         memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1331         iocbq->sli4_lxritag = NO_XRI;
1332         iocbq->sli4_xritag = NO_XRI;
1333         iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1334                               LPFC_IO_NVME_LS);
1335         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1336 }
1337
1338
1339 /**
1340  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1341  * @phba: Pointer to HBA context object.
1342  * @iocbq: Pointer to driver iocb object.
1343  *
1344  * This function is called with hbalock held to release driver
1345  * iocb object to the iocb pool. The iotag in the iocb object
1346  * does not change for each use of the iocb object. This function
1347  * clears all other fields of the iocb object when it is freed.
1348  **/
1349 static void
1350 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1351 {
1352         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1353
1354         lockdep_assert_held(&phba->hbalock);
1355
1356         /*
1357          * Clean all volatile data fields, preserve iotag and node struct.
1358          */
1359         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1360         iocbq->sli4_xritag = NO_XRI;
1361         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1362 }
1363
1364 /**
1365  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1366  * @phba: Pointer to HBA context object.
1367  * @iocbq: Pointer to driver iocb object.
1368  *
1369  * This function is called with hbalock held to release driver
1370  * iocb object to the iocb pool. The iotag in the iocb object
1371  * does not change for each use of the iocb object. This function
1372  * clears all other fields of the iocb object when it is freed.
1373  **/
1374 static void
1375 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1376 {
1377         lockdep_assert_held(&phba->hbalock);
1378
1379         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1380         phba->iocb_cnt--;
1381 }
1382
1383 /**
1384  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1385  * @phba: Pointer to HBA context object.
1386  * @iocbq: Pointer to driver iocb object.
1387  *
1388  * This function is called with no lock held to release the iocb to
1389  * iocb pool.
1390  **/
1391 void
1392 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1393 {
1394         unsigned long iflags;
1395
1396         /*
1397          * Clean all volatile data fields, preserve iotag and node struct.
1398          */
1399         spin_lock_irqsave(&phba->hbalock, iflags);
1400         __lpfc_sli_release_iocbq(phba, iocbq);
1401         spin_unlock_irqrestore(&phba->hbalock, iflags);
1402 }
1403
1404 /**
1405  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1406  * @phba: Pointer to HBA context object.
1407  * @iocblist: List of IOCBs.
1408  * @ulpstatus: ULP status in IOCB command field.
1409  * @ulpWord4: ULP word-4 in IOCB command field.
1410  *
1411  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1412  * on the list by invoking the complete callback function associated with the
1413  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1414  * fields.
1415  **/
1416 void
1417 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1418                       uint32_t ulpstatus, uint32_t ulpWord4)
1419 {
1420         struct lpfc_iocbq *piocb;
1421
1422         while (!list_empty(iocblist)) {
1423                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1424                 if (!piocb->iocb_cmpl) {
1425                         if (piocb->iocb_flag & LPFC_IO_NVME)
1426                                 lpfc_nvme_cancel_iocb(phba, piocb);
1427                         else
1428                                 lpfc_sli_release_iocbq(phba, piocb);
1429                 } else {
1430                         piocb->iocb.ulpStatus = ulpstatus;
1431                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1432                         (piocb->iocb_cmpl) (phba, piocb, piocb);
1433                 }
1434         }
1435         return;
1436 }
1437
1438 /**
1439  * lpfc_sli_iocb_cmd_type - Get the iocb type
1440  * @iocb_cmnd: iocb command code.
1441  *
1442  * This function is called by ring event handler function to get the iocb type.
1443  * This function translates the iocb command to an iocb command type used to
1444  * decide the final disposition of each completed IOCB.
1445  * The function returns
1446  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1447  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1448  * LPFC_ABORT_IOCB   if it is an abort iocb
1449  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1450  *
1451  * The caller is not required to hold any lock.
1452  **/
1453 static lpfc_iocb_type
1454 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1455 {
1456         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1457
1458         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1459                 return 0;
1460
1461         switch (iocb_cmnd) {
1462         case CMD_XMIT_SEQUENCE_CR:
1463         case CMD_XMIT_SEQUENCE_CX:
1464         case CMD_XMIT_BCAST_CN:
1465         case CMD_XMIT_BCAST_CX:
1466         case CMD_ELS_REQUEST_CR:
1467         case CMD_ELS_REQUEST_CX:
1468         case CMD_CREATE_XRI_CR:
1469         case CMD_CREATE_XRI_CX:
1470         case CMD_GET_RPI_CN:
1471         case CMD_XMIT_ELS_RSP_CX:
1472         case CMD_GET_RPI_CR:
1473         case CMD_FCP_IWRITE_CR:
1474         case CMD_FCP_IWRITE_CX:
1475         case CMD_FCP_IREAD_CR:
1476         case CMD_FCP_IREAD_CX:
1477         case CMD_FCP_ICMND_CR:
1478         case CMD_FCP_ICMND_CX:
1479         case CMD_FCP_TSEND_CX:
1480         case CMD_FCP_TRSP_CX:
1481         case CMD_FCP_TRECEIVE_CX:
1482         case CMD_FCP_AUTO_TRSP_CX:
1483         case CMD_ADAPTER_MSG:
1484         case CMD_ADAPTER_DUMP:
1485         case CMD_XMIT_SEQUENCE64_CR:
1486         case CMD_XMIT_SEQUENCE64_CX:
1487         case CMD_XMIT_BCAST64_CN:
1488         case CMD_XMIT_BCAST64_CX:
1489         case CMD_ELS_REQUEST64_CR:
1490         case CMD_ELS_REQUEST64_CX:
1491         case CMD_FCP_IWRITE64_CR:
1492         case CMD_FCP_IWRITE64_CX:
1493         case CMD_FCP_IREAD64_CR:
1494         case CMD_FCP_IREAD64_CX:
1495         case CMD_FCP_ICMND64_CR:
1496         case CMD_FCP_ICMND64_CX:
1497         case CMD_FCP_TSEND64_CX:
1498         case CMD_FCP_TRSP64_CX:
1499         case CMD_FCP_TRECEIVE64_CX:
1500         case CMD_GEN_REQUEST64_CR:
1501         case CMD_GEN_REQUEST64_CX:
1502         case CMD_XMIT_ELS_RSP64_CX:
1503         case DSSCMD_IWRITE64_CR:
1504         case DSSCMD_IWRITE64_CX:
1505         case DSSCMD_IREAD64_CR:
1506         case DSSCMD_IREAD64_CX:
1507                 type = LPFC_SOL_IOCB;
1508                 break;
1509         case CMD_ABORT_XRI_CN:
1510         case CMD_ABORT_XRI_CX:
1511         case CMD_CLOSE_XRI_CN:
1512         case CMD_CLOSE_XRI_CX:
1513         case CMD_XRI_ABORTED_CX:
1514         case CMD_ABORT_MXRI64_CN:
1515         case CMD_XMIT_BLS_RSP64_CX:
1516                 type = LPFC_ABORT_IOCB;
1517                 break;
1518         case CMD_RCV_SEQUENCE_CX:
1519         case CMD_RCV_ELS_REQ_CX:
1520         case CMD_RCV_SEQUENCE64_CX:
1521         case CMD_RCV_ELS_REQ64_CX:
1522         case CMD_ASYNC_STATUS:
1523         case CMD_IOCB_RCV_SEQ64_CX:
1524         case CMD_IOCB_RCV_ELS64_CX:
1525         case CMD_IOCB_RCV_CONT64_CX:
1526         case CMD_IOCB_RET_XRI64_CX:
1527                 type = LPFC_UNSOL_IOCB;
1528                 break;
1529         case CMD_IOCB_XMIT_MSEQ64_CR:
1530         case CMD_IOCB_XMIT_MSEQ64_CX:
1531         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1532         case CMD_IOCB_RCV_ELS_LIST64_CX:
1533         case CMD_IOCB_CLOSE_EXTENDED_CN:
1534         case CMD_IOCB_ABORT_EXTENDED_CN:
1535         case CMD_IOCB_RET_HBQE64_CN:
1536         case CMD_IOCB_FCP_IBIDIR64_CR:
1537         case CMD_IOCB_FCP_IBIDIR64_CX:
1538         case CMD_IOCB_FCP_ITASKMGT64_CX:
1539         case CMD_IOCB_LOGENTRY_CN:
1540         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1541                 printk("%s - Unhandled SLI-3 Command x%x\n",
1542                                 __func__, iocb_cmnd);
1543                 type = LPFC_UNKNOWN_IOCB;
1544                 break;
1545         default:
1546                 type = LPFC_UNKNOWN_IOCB;
1547                 break;
1548         }
1549
1550         return type;
1551 }
1552
1553 /**
1554  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1555  * @phba: Pointer to HBA context object.
1556  *
1557  * This function is called from SLI initialization code
1558  * to configure every ring of the HBA's SLI interface. The
1559  * caller is not required to hold any lock. This function issues
1560  * a config_ring mailbox command for each ring.
1561  * This function returns zero if successful else returns a negative
1562  * error code.
1563  **/
1564 static int
1565 lpfc_sli_ring_map(struct lpfc_hba *phba)
1566 {
1567         struct lpfc_sli *psli = &phba->sli;
1568         LPFC_MBOXQ_t *pmb;
1569         MAILBOX_t *pmbox;
1570         int i, rc, ret = 0;
1571
1572         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1573         if (!pmb)
1574                 return -ENOMEM;
1575         pmbox = &pmb->u.mb;
1576         phba->link_state = LPFC_INIT_MBX_CMDS;
1577         for (i = 0; i < psli->num_rings; i++) {
1578                 lpfc_config_ring(phba, i, pmb);
1579                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1580                 if (rc != MBX_SUCCESS) {
1581                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1582                                         "0446 Adapter failed to init (%d), "
1583                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1584                                         "ring %d\n",
1585                                         rc, pmbox->mbxCommand,
1586                                         pmbox->mbxStatus, i);
1587                         phba->link_state = LPFC_HBA_ERROR;
1588                         ret = -ENXIO;
1589                         break;
1590                 }
1591         }
1592         mempool_free(pmb, phba->mbox_mem_pool);
1593         return ret;
1594 }
1595
1596 /**
1597  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1598  * @phba: Pointer to HBA context object.
1599  * @pring: Pointer to driver SLI ring object.
1600  * @piocb: Pointer to the driver iocb object.
1601  *
1602  * The driver calls this function with the hbalock held for SLI3 ports or
1603  * the ring lock held for SLI4 ports. The function adds the
1604  * new iocb to txcmplq of the given ring. This function always returns
1605  * 0. If this function is called for ELS ring, this function checks if
1606  * there is a vport associated with the ELS command. This function also
1607  * starts els_tmofunc timer if this is an ELS command.
1608  **/
1609 static int
1610 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1611                         struct lpfc_iocbq *piocb)
1612 {
1613         if (phba->sli_rev == LPFC_SLI_REV4)
1614                 lockdep_assert_held(&pring->ring_lock);
1615         else
1616                 lockdep_assert_held(&phba->hbalock);
1617
1618         BUG_ON(!piocb);
1619
1620         list_add_tail(&piocb->list, &pring->txcmplq);
1621         piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1622         pring->txcmplq_cnt++;
1623
1624         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1625            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1626            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1627                 BUG_ON(!piocb->vport);
1628                 if (!(piocb->vport->load_flag & FC_UNLOADING))
1629                         mod_timer(&piocb->vport->els_tmofunc,
1630                                   jiffies +
1631                                   msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1632         }
1633
1634         return 0;
1635 }
1636
1637 /**
1638  * lpfc_sli_ringtx_get - Get first element of the txq
1639  * @phba: Pointer to HBA context object.
1640  * @pring: Pointer to driver SLI ring object.
1641  *
1642  * This function is called with hbalock held to get next
1643  * iocb in txq of the given ring. If there is any iocb in
1644  * the txq, the function returns first iocb in the list after
1645  * removing the iocb from the list, else it returns NULL.
1646  **/
1647 struct lpfc_iocbq *
1648 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1649 {
1650         struct lpfc_iocbq *cmd_iocb;
1651
1652         lockdep_assert_held(&phba->hbalock);
1653
1654         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1655         return cmd_iocb;
1656 }
1657
1658 /**
1659  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1660  * @phba: Pointer to HBA context object.
1661  * @pring: Pointer to driver SLI ring object.
1662  *
1663  * This function is called with hbalock held and the caller must post the
1664  * iocb without releasing the lock. If the caller releases the lock,
1665  * iocb slot returned by the function is not guaranteed to be available.
1666  * The function returns pointer to the next available iocb slot if there
1667  * is available slot in the ring, else it returns NULL.
1668  * If the get index of the ring is ahead of the put index, the function
1669  * will post an error attention event to the worker thread to take the
1670  * HBA to offline state.
1671  **/
1672 static IOCB_t *
1673 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1674 {
1675         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1676         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1677
1678         lockdep_assert_held(&phba->hbalock);
1679
1680         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1681            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1682                 pring->sli.sli3.next_cmdidx = 0;
1683
1684         if (unlikely(pring->sli.sli3.local_getidx ==
1685                 pring->sli.sli3.next_cmdidx)) {
1686
1687                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1688
1689                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1690                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1691                                         "0315 Ring %d issue: portCmdGet %d "
1692                                         "is bigger than cmd ring %d\n",
1693                                         pring->ringno,
1694                                         pring->sli.sli3.local_getidx,
1695                                         max_cmd_idx);
1696
1697                         phba->link_state = LPFC_HBA_ERROR;
1698                         /*
1699                          * All error attention handlers are posted to
1700                          * worker thread
1701                          */
1702                         phba->work_ha |= HA_ERATT;
1703                         phba->work_hs = HS_FFER3;
1704
1705                         lpfc_worker_wake_up(phba);
1706
1707                         return NULL;
1708                 }
1709
1710                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1711                         return NULL;
1712         }
1713
1714         return lpfc_cmd_iocb(phba, pring);
1715 }
1716
1717 /**
1718  * lpfc_sli_next_iotag - Get an iotag for the iocb
1719  * @phba: Pointer to HBA context object.
1720  * @iocbq: Pointer to driver iocb object.
1721  *
1722  * This function gets an iotag for the iocb. If there is no unused iotag and
1723  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1724  * array and assigns a new iotag.
1725  * The function returns the allocated iotag if successful, else returns zero.
1726  * Zero is not a valid iotag.
1727  * The caller is not required to hold any lock.
1728  **/
1729 uint16_t
1730 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1731 {
1732         struct lpfc_iocbq **new_arr;
1733         struct lpfc_iocbq **old_arr;
1734         size_t new_len;
1735         struct lpfc_sli *psli = &phba->sli;
1736         uint16_t iotag;
1737
1738         spin_lock_irq(&phba->hbalock);
1739         iotag = psli->last_iotag;
1740         if(++iotag < psli->iocbq_lookup_len) {
1741                 psli->last_iotag = iotag;
1742                 psli->iocbq_lookup[iotag] = iocbq;
1743                 spin_unlock_irq(&phba->hbalock);
1744                 iocbq->iotag = iotag;
1745                 return iotag;
1746         } else if (psli->iocbq_lookup_len < (0xffff
1747                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1748                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1749                 spin_unlock_irq(&phba->hbalock);
1750                 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1751                                   GFP_KERNEL);
1752                 if (new_arr) {
1753                         spin_lock_irq(&phba->hbalock);
1754                         old_arr = psli->iocbq_lookup;
1755                         if (new_len <= psli->iocbq_lookup_len) {
1756                                 /* highly unprobable case */
1757                                 kfree(new_arr);
1758                                 iotag = psli->last_iotag;
1759                                 if(++iotag < psli->iocbq_lookup_len) {
1760                                         psli->last_iotag = iotag;
1761                                         psli->iocbq_lookup[iotag] = iocbq;
1762                                         spin_unlock_irq(&phba->hbalock);
1763                                         iocbq->iotag = iotag;
1764                                         return iotag;
1765                                 }
1766                                 spin_unlock_irq(&phba->hbalock);
1767                                 return 0;
1768                         }
1769                         if (psli->iocbq_lookup)
1770                                 memcpy(new_arr, old_arr,
1771                                        ((psli->last_iotag  + 1) *
1772                                         sizeof (struct lpfc_iocbq *)));
1773                         psli->iocbq_lookup = new_arr;
1774                         psli->iocbq_lookup_len = new_len;
1775                         psli->last_iotag = iotag;
1776                         psli->iocbq_lookup[iotag] = iocbq;
1777                         spin_unlock_irq(&phba->hbalock);
1778                         iocbq->iotag = iotag;
1779                         kfree(old_arr);
1780                         return iotag;
1781                 }
1782         } else
1783                 spin_unlock_irq(&phba->hbalock);
1784
1785         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1786                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1787                         psli->last_iotag);
1788
1789         return 0;
1790 }
1791
1792 /**
1793  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1794  * @phba: Pointer to HBA context object.
1795  * @pring: Pointer to driver SLI ring object.
1796  * @iocb: Pointer to iocb slot in the ring.
1797  * @nextiocb: Pointer to driver iocb object which need to be
1798  *            posted to firmware.
1799  *
1800  * This function is called with hbalock held to post a new iocb to
1801  * the firmware. This function copies the new iocb to ring iocb slot and
1802  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1803  * a completion call back for this iocb else the function will free the
1804  * iocb object.
1805  **/
1806 static void
1807 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1808                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1809 {
1810         lockdep_assert_held(&phba->hbalock);
1811         /*
1812          * Set up an iotag
1813          */
1814         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1815
1816
1817         if (pring->ringno == LPFC_ELS_RING) {
1818                 lpfc_debugfs_slow_ring_trc(phba,
1819                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1820                         *(((uint32_t *) &nextiocb->iocb) + 4),
1821                         *(((uint32_t *) &nextiocb->iocb) + 6),
1822                         *(((uint32_t *) &nextiocb->iocb) + 7));
1823         }
1824
1825         /*
1826          * Issue iocb command to adapter
1827          */
1828         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1829         wmb();
1830         pring->stats.iocb_cmd++;
1831
1832         /*
1833          * If there is no completion routine to call, we can release the
1834          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1835          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1836          */
1837         if (nextiocb->iocb_cmpl)
1838                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1839         else
1840                 __lpfc_sli_release_iocbq(phba, nextiocb);
1841
1842         /*
1843          * Let the HBA know what IOCB slot will be the next one the
1844          * driver will put a command into.
1845          */
1846         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1847         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1848 }
1849
1850 /**
1851  * lpfc_sli_update_full_ring - Update the chip attention register
1852  * @phba: Pointer to HBA context object.
1853  * @pring: Pointer to driver SLI ring object.
1854  *
1855  * The caller is not required to hold any lock for calling this function.
1856  * This function updates the chip attention bits for the ring to inform firmware
1857  * that there are pending work to be done for this ring and requests an
1858  * interrupt when there is space available in the ring. This function is
1859  * called when the driver is unable to post more iocbs to the ring due
1860  * to unavailability of space in the ring.
1861  **/
1862 static void
1863 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1864 {
1865         int ringno = pring->ringno;
1866
1867         pring->flag |= LPFC_CALL_RING_AVAILABLE;
1868
1869         wmb();
1870
1871         /*
1872          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1873          * The HBA will tell us when an IOCB entry is available.
1874          */
1875         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1876         readl(phba->CAregaddr); /* flush */
1877
1878         pring->stats.iocb_cmd_full++;
1879 }
1880
1881 /**
1882  * lpfc_sli_update_ring - Update chip attention register
1883  * @phba: Pointer to HBA context object.
1884  * @pring: Pointer to driver SLI ring object.
1885  *
1886  * This function updates the chip attention register bit for the
1887  * given ring to inform HBA that there is more work to be done
1888  * in this ring. The caller is not required to hold any lock.
1889  **/
1890 static void
1891 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1892 {
1893         int ringno = pring->ringno;
1894
1895         /*
1896          * Tell the HBA that there is work to do in this ring.
1897          */
1898         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1899                 wmb();
1900                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1901                 readl(phba->CAregaddr); /* flush */
1902         }
1903 }
1904
1905 /**
1906  * lpfc_sli_resume_iocb - Process iocbs in the txq
1907  * @phba: Pointer to HBA context object.
1908  * @pring: Pointer to driver SLI ring object.
1909  *
1910  * This function is called with hbalock held to post pending iocbs
1911  * in the txq to the firmware. This function is called when driver
1912  * detects space available in the ring.
1913  **/
1914 static void
1915 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1916 {
1917         IOCB_t *iocb;
1918         struct lpfc_iocbq *nextiocb;
1919
1920         lockdep_assert_held(&phba->hbalock);
1921
1922         /*
1923          * Check to see if:
1924          *  (a) there is anything on the txq to send
1925          *  (b) link is up
1926          *  (c) link attention events can be processed (fcp ring only)
1927          *  (d) IOCB processing is not blocked by the outstanding mbox command.
1928          */
1929
1930         if (lpfc_is_link_up(phba) &&
1931             (!list_empty(&pring->txq)) &&
1932             (pring->ringno != LPFC_FCP_RING ||
1933              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1934
1935                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1936                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1937                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1938
1939                 if (iocb)
1940                         lpfc_sli_update_ring(phba, pring);
1941                 else
1942                         lpfc_sli_update_full_ring(phba, pring);
1943         }
1944
1945         return;
1946 }
1947
1948 /**
1949  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1950  * @phba: Pointer to HBA context object.
1951  * @hbqno: HBQ number.
1952  *
1953  * This function is called with hbalock held to get the next
1954  * available slot for the given HBQ. If there is free slot
1955  * available for the HBQ it will return pointer to the next available
1956  * HBQ entry else it will return NULL.
1957  **/
1958 static struct lpfc_hbq_entry *
1959 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1960 {
1961         struct hbq_s *hbqp = &phba->hbqs[hbqno];
1962
1963         lockdep_assert_held(&phba->hbalock);
1964
1965         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1966             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1967                 hbqp->next_hbqPutIdx = 0;
1968
1969         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1970                 uint32_t raw_index = phba->hbq_get[hbqno];
1971                 uint32_t getidx = le32_to_cpu(raw_index);
1972
1973                 hbqp->local_hbqGetIdx = getidx;
1974
1975                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1976                         lpfc_printf_log(phba, KERN_ERR,
1977                                         LOG_SLI | LOG_VPORT,
1978                                         "1802 HBQ %d: local_hbqGetIdx "
1979                                         "%u is > than hbqp->entry_count %u\n",
1980                                         hbqno, hbqp->local_hbqGetIdx,
1981                                         hbqp->entry_count);
1982
1983                         phba->link_state = LPFC_HBA_ERROR;
1984                         return NULL;
1985                 }
1986
1987                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1988                         return NULL;
1989         }
1990
1991         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1992                         hbqp->hbqPutIdx;
1993 }
1994
1995 /**
1996  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1997  * @phba: Pointer to HBA context object.
1998  *
1999  * This function is called with no lock held to free all the
2000  * hbq buffers while uninitializing the SLI interface. It also
2001  * frees the HBQ buffers returned by the firmware but not yet
2002  * processed by the upper layers.
2003  **/
2004 void
2005 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2006 {
2007         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2008         struct hbq_dmabuf *hbq_buf;
2009         unsigned long flags;
2010         int i, hbq_count;
2011
2012         hbq_count = lpfc_sli_hbq_count();
2013         /* Return all memory used by all HBQs */
2014         spin_lock_irqsave(&phba->hbalock, flags);
2015         for (i = 0; i < hbq_count; ++i) {
2016                 list_for_each_entry_safe(dmabuf, next_dmabuf,
2017                                 &phba->hbqs[i].hbq_buffer_list, list) {
2018                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2019                         list_del(&hbq_buf->dbuf.list);
2020                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2021                 }
2022                 phba->hbqs[i].buffer_count = 0;
2023         }
2024
2025         /* Mark the HBQs not in use */
2026         phba->hbq_in_use = 0;
2027         spin_unlock_irqrestore(&phba->hbalock, flags);
2028 }
2029
2030 /**
2031  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2032  * @phba: Pointer to HBA context object.
2033  * @hbqno: HBQ number.
2034  * @hbq_buf: Pointer to HBQ buffer.
2035  *
2036  * This function is called with the hbalock held to post a
2037  * hbq buffer to the firmware. If the function finds an empty
2038  * slot in the HBQ, it will post the buffer. The function will return
2039  * pointer to the hbq entry if it successfully post the buffer
2040  * else it will return NULL.
2041  **/
2042 static int
2043 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2044                          struct hbq_dmabuf *hbq_buf)
2045 {
2046         lockdep_assert_held(&phba->hbalock);
2047         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2048 }
2049
2050 /**
2051  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2052  * @phba: Pointer to HBA context object.
2053  * @hbqno: HBQ number.
2054  * @hbq_buf: Pointer to HBQ buffer.
2055  *
2056  * This function is called with the hbalock held to post a hbq buffer to the
2057  * firmware. If the function finds an empty slot in the HBQ, it will post the
2058  * buffer and place it on the hbq_buffer_list. The function will return zero if
2059  * it successfully post the buffer else it will return an error.
2060  **/
2061 static int
2062 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2063                             struct hbq_dmabuf *hbq_buf)
2064 {
2065         struct lpfc_hbq_entry *hbqe;
2066         dma_addr_t physaddr = hbq_buf->dbuf.phys;
2067
2068         lockdep_assert_held(&phba->hbalock);
2069         /* Get next HBQ entry slot to use */
2070         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2071         if (hbqe) {
2072                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2073
2074                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2075                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2076                 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2077                 hbqe->bde.tus.f.bdeFlags = 0;
2078                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2079                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2080                                 /* Sync SLIM */
2081                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2082                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2083                                 /* flush */
2084                 readl(phba->hbq_put + hbqno);
2085                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2086                 return 0;
2087         } else
2088                 return -ENOMEM;
2089 }
2090
2091 /**
2092  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2093  * @phba: Pointer to HBA context object.
2094  * @hbqno: HBQ number.
2095  * @hbq_buf: Pointer to HBQ buffer.
2096  *
2097  * This function is called with the hbalock held to post an RQE to the SLI4
2098  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2099  * the hbq_buffer_list and return zero, otherwise it will return an error.
2100  **/
2101 static int
2102 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2103                             struct hbq_dmabuf *hbq_buf)
2104 {
2105         int rc;
2106         struct lpfc_rqe hrqe;
2107         struct lpfc_rqe drqe;
2108         struct lpfc_queue *hrq;
2109         struct lpfc_queue *drq;
2110
2111         if (hbqno != LPFC_ELS_HBQ)
2112                 return 1;
2113         hrq = phba->sli4_hba.hdr_rq;
2114         drq = phba->sli4_hba.dat_rq;
2115
2116         lockdep_assert_held(&phba->hbalock);
2117         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2118         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2119         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2120         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2121         rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2122         if (rc < 0)
2123                 return rc;
2124         hbq_buf->tag = (rc | (hbqno << 16));
2125         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2126         return 0;
2127 }
2128
2129 /* HBQ for ELS and CT traffic. */
2130 static struct lpfc_hbq_init lpfc_els_hbq = {
2131         .rn = 1,
2132         .entry_count = 256,
2133         .mask_count = 0,
2134         .profile = 0,
2135         .ring_mask = (1 << LPFC_ELS_RING),
2136         .buffer_count = 0,
2137         .init_count = 40,
2138         .add_count = 40,
2139 };
2140
2141 /* Array of HBQs */
2142 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2143         &lpfc_els_hbq,
2144 };
2145
2146 /**
2147  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2148  * @phba: Pointer to HBA context object.
2149  * @hbqno: HBQ number.
2150  * @count: Number of HBQ buffers to be posted.
2151  *
2152  * This function is called with no lock held to post more hbq buffers to the
2153  * given HBQ. The function returns the number of HBQ buffers successfully
2154  * posted.
2155  **/
2156 static int
2157 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2158 {
2159         uint32_t i, posted = 0;
2160         unsigned long flags;
2161         struct hbq_dmabuf *hbq_buffer;
2162         LIST_HEAD(hbq_buf_list);
2163         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2164                 return 0;
2165
2166         if ((phba->hbqs[hbqno].buffer_count + count) >
2167             lpfc_hbq_defs[hbqno]->entry_count)
2168                 count = lpfc_hbq_defs[hbqno]->entry_count -
2169                                         phba->hbqs[hbqno].buffer_count;
2170         if (!count)
2171                 return 0;
2172         /* Allocate HBQ entries */
2173         for (i = 0; i < count; i++) {
2174                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2175                 if (!hbq_buffer)
2176                         break;
2177                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2178         }
2179         /* Check whether HBQ is still in use */
2180         spin_lock_irqsave(&phba->hbalock, flags);
2181         if (!phba->hbq_in_use)
2182                 goto err;
2183         while (!list_empty(&hbq_buf_list)) {
2184                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2185                                  dbuf.list);
2186                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2187                                       (hbqno << 16));
2188                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2189                         phba->hbqs[hbqno].buffer_count++;
2190                         posted++;
2191                 } else
2192                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2193         }
2194         spin_unlock_irqrestore(&phba->hbalock, flags);
2195         return posted;
2196 err:
2197         spin_unlock_irqrestore(&phba->hbalock, flags);
2198         while (!list_empty(&hbq_buf_list)) {
2199                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2200                                  dbuf.list);
2201                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2202         }
2203         return 0;
2204 }
2205
2206 /**
2207  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2208  * @phba: Pointer to HBA context object.
2209  * @qno: HBQ number.
2210  *
2211  * This function posts more buffers to the HBQ. This function
2212  * is called with no lock held. The function returns the number of HBQ entries
2213  * successfully allocated.
2214  **/
2215 int
2216 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2217 {
2218         if (phba->sli_rev == LPFC_SLI_REV4)
2219                 return 0;
2220         else
2221                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2222                                          lpfc_hbq_defs[qno]->add_count);
2223 }
2224
2225 /**
2226  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2227  * @phba: Pointer to HBA context object.
2228  * @qno:  HBQ queue number.
2229  *
2230  * This function is called from SLI initialization code path with
2231  * no lock held to post initial HBQ buffers to firmware. The
2232  * function returns the number of HBQ entries successfully allocated.
2233  **/
2234 static int
2235 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2236 {
2237         if (phba->sli_rev == LPFC_SLI_REV4)
2238                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2239                                         lpfc_hbq_defs[qno]->entry_count);
2240         else
2241                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2242                                          lpfc_hbq_defs[qno]->init_count);
2243 }
2244
2245 /**
2246  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2247  * @phba: Pointer to HBA context object.
2248  * @hbqno: HBQ number.
2249  *
2250  * This function removes the first hbq buffer on an hbq list and returns a
2251  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2252  **/
2253 static struct hbq_dmabuf *
2254 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2255 {
2256         struct lpfc_dmabuf *d_buf;
2257
2258         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2259         if (!d_buf)
2260                 return NULL;
2261         return container_of(d_buf, struct hbq_dmabuf, dbuf);
2262 }
2263
2264 /**
2265  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2266  * @phba: Pointer to HBA context object.
2267  * @hbqno: HBQ number.
2268  *
2269  * This function removes the first RQ buffer on an RQ buffer list and returns a
2270  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2271  **/
2272 static struct rqb_dmabuf *
2273 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2274 {
2275         struct lpfc_dmabuf *h_buf;
2276         struct lpfc_rqb *rqbp;
2277
2278         rqbp = hrq->rqbp;
2279         list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2280                          struct lpfc_dmabuf, list);
2281         if (!h_buf)
2282                 return NULL;
2283         rqbp->buffer_count--;
2284         return container_of(h_buf, struct rqb_dmabuf, hbuf);
2285 }
2286
2287 /**
2288  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2289  * @phba: Pointer to HBA context object.
2290  * @tag: Tag of the hbq buffer.
2291  *
2292  * This function searches for the hbq buffer associated with the given tag in
2293  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2294  * otherwise it returns NULL.
2295  **/
2296 static struct hbq_dmabuf *
2297 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2298 {
2299         struct lpfc_dmabuf *d_buf;
2300         struct hbq_dmabuf *hbq_buf;
2301         uint32_t hbqno;
2302
2303         hbqno = tag >> 16;
2304         if (hbqno >= LPFC_MAX_HBQS)
2305                 return NULL;
2306
2307         spin_lock_irq(&phba->hbalock);
2308         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2309                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2310                 if (hbq_buf->tag == tag) {
2311                         spin_unlock_irq(&phba->hbalock);
2312                         return hbq_buf;
2313                 }
2314         }
2315         spin_unlock_irq(&phba->hbalock);
2316         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2317                         "1803 Bad hbq tag. Data: x%x x%x\n",
2318                         tag, phba->hbqs[tag >> 16].buffer_count);
2319         return NULL;
2320 }
2321
2322 /**
2323  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2324  * @phba: Pointer to HBA context object.
2325  * @hbq_buffer: Pointer to HBQ buffer.
2326  *
2327  * This function is called with hbalock. This function gives back
2328  * the hbq buffer to firmware. If the HBQ does not have space to
2329  * post the buffer, it will free the buffer.
2330  **/
2331 void
2332 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2333 {
2334         uint32_t hbqno;
2335
2336         if (hbq_buffer) {
2337                 hbqno = hbq_buffer->tag >> 16;
2338                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2339                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2340         }
2341 }
2342
2343 /**
2344  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2345  * @mbxCommand: mailbox command code.
2346  *
2347  * This function is called by the mailbox event handler function to verify
2348  * that the completed mailbox command is a legitimate mailbox command. If the
2349  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2350  * and the mailbox event handler will take the HBA offline.
2351  **/
2352 static int
2353 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2354 {
2355         uint8_t ret;
2356
2357         switch (mbxCommand) {
2358         case MBX_LOAD_SM:
2359         case MBX_READ_NV:
2360         case MBX_WRITE_NV:
2361         case MBX_WRITE_VPARMS:
2362         case MBX_RUN_BIU_DIAG:
2363         case MBX_INIT_LINK:
2364         case MBX_DOWN_LINK:
2365         case MBX_CONFIG_LINK:
2366         case MBX_CONFIG_RING:
2367         case MBX_RESET_RING:
2368         case MBX_READ_CONFIG:
2369         case MBX_READ_RCONFIG:
2370         case MBX_READ_SPARM:
2371         case MBX_READ_STATUS:
2372         case MBX_READ_RPI:
2373         case MBX_READ_XRI:
2374         case MBX_READ_REV:
2375         case MBX_READ_LNK_STAT:
2376         case MBX_REG_LOGIN:
2377         case MBX_UNREG_LOGIN:
2378         case MBX_CLEAR_LA:
2379         case MBX_DUMP_MEMORY:
2380         case MBX_DUMP_CONTEXT:
2381         case MBX_RUN_DIAGS:
2382         case MBX_RESTART:
2383         case MBX_UPDATE_CFG:
2384         case MBX_DOWN_LOAD:
2385         case MBX_DEL_LD_ENTRY:
2386         case MBX_RUN_PROGRAM:
2387         case MBX_SET_MASK:
2388         case MBX_SET_VARIABLE:
2389         case MBX_UNREG_D_ID:
2390         case MBX_KILL_BOARD:
2391         case MBX_CONFIG_FARP:
2392         case MBX_BEACON:
2393         case MBX_LOAD_AREA:
2394         case MBX_RUN_BIU_DIAG64:
2395         case MBX_CONFIG_PORT:
2396         case MBX_READ_SPARM64:
2397         case MBX_READ_RPI64:
2398         case MBX_REG_LOGIN64:
2399         case MBX_READ_TOPOLOGY:
2400         case MBX_WRITE_WWN:
2401         case MBX_SET_DEBUG:
2402         case MBX_LOAD_EXP_ROM:
2403         case MBX_ASYNCEVT_ENABLE:
2404         case MBX_REG_VPI:
2405         case MBX_UNREG_VPI:
2406         case MBX_HEARTBEAT:
2407         case MBX_PORT_CAPABILITIES:
2408         case MBX_PORT_IOV_CONTROL:
2409         case MBX_SLI4_CONFIG:
2410         case MBX_SLI4_REQ_FTRS:
2411         case MBX_REG_FCFI:
2412         case MBX_UNREG_FCFI:
2413         case MBX_REG_VFI:
2414         case MBX_UNREG_VFI:
2415         case MBX_INIT_VPI:
2416         case MBX_INIT_VFI:
2417         case MBX_RESUME_RPI:
2418         case MBX_READ_EVENT_LOG_STATUS:
2419         case MBX_READ_EVENT_LOG:
2420         case MBX_SECURITY_MGMT:
2421         case MBX_AUTH_PORT:
2422         case MBX_ACCESS_VDATA:
2423                 ret = mbxCommand;
2424                 break;
2425         default:
2426                 ret = MBX_SHUTDOWN;
2427                 break;
2428         }
2429         return ret;
2430 }
2431
2432 /**
2433  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2434  * @phba: Pointer to HBA context object.
2435  * @pmboxq: Pointer to mailbox command.
2436  *
2437  * This is completion handler function for mailbox commands issued from
2438  * lpfc_sli_issue_mbox_wait function. This function is called by the
2439  * mailbox event handler function with no lock held. This function
2440  * will wake up thread waiting on the wait queue pointed by context1
2441  * of the mailbox.
2442  **/
2443 void
2444 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2445 {
2446         unsigned long drvr_flag;
2447         struct completion *pmbox_done;
2448
2449         /*
2450          * If pmbox_done is empty, the driver thread gave up waiting and
2451          * continued running.
2452          */
2453         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2454         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2455         pmbox_done = (struct completion *)pmboxq->context3;
2456         if (pmbox_done)
2457                 complete(pmbox_done);
2458         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2459         return;
2460 }
2461
2462 static void
2463 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2464 {
2465         unsigned long iflags;
2466
2467         if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2468                 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2469                 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2470                 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2471                 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2472                 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2473         }
2474         ndlp->nlp_flag &= ~NLP_UNREG_INP;
2475 }
2476
2477 /**
2478  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2479  * @phba: Pointer to HBA context object.
2480  * @pmb: Pointer to mailbox object.
2481  *
2482  * This function is the default mailbox completion handler. It
2483  * frees the memory resources associated with the completed mailbox
2484  * command. If the completed command is a REG_LOGIN mailbox command,
2485  * this function will issue a UREG_LOGIN to re-claim the RPI.
2486  **/
2487 void
2488 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2489 {
2490         struct lpfc_vport  *vport = pmb->vport;
2491         struct lpfc_dmabuf *mp;
2492         struct lpfc_nodelist *ndlp;
2493         struct Scsi_Host *shost;
2494         uint16_t rpi, vpi;
2495         int rc;
2496
2497         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2498
2499         if (mp) {
2500                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2501                 kfree(mp);
2502         }
2503
2504         /*
2505          * If a REG_LOGIN succeeded  after node is destroyed or node
2506          * is in re-discovery driver need to cleanup the RPI.
2507          */
2508         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2509             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2510             !pmb->u.mb.mbxStatus) {
2511                 rpi = pmb->u.mb.un.varWords[0];
2512                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2513                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2514                 pmb->vport = vport;
2515                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2516                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2517                 if (rc != MBX_NOT_FINISHED)
2518                         return;
2519         }
2520
2521         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2522                 !(phba->pport->load_flag & FC_UNLOADING) &&
2523                 !pmb->u.mb.mbxStatus) {
2524                 shost = lpfc_shost_from_vport(vport);
2525                 spin_lock_irq(shost->host_lock);
2526                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2527                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2528                 spin_unlock_irq(shost->host_lock);
2529         }
2530
2531         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2532                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2533                 lpfc_nlp_put(ndlp);
2534                 pmb->ctx_buf = NULL;
2535                 pmb->ctx_ndlp = NULL;
2536         }
2537
2538         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2539                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2540
2541                 /* Check to see if there are any deferred events to process */
2542                 if (ndlp) {
2543                         lpfc_printf_vlog(
2544                                 vport,
2545                                 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2546                                 "1438 UNREG cmpl deferred mbox x%x "
2547                                 "on NPort x%x Data: x%x x%x %px\n",
2548                                 ndlp->nlp_rpi, ndlp->nlp_DID,
2549                                 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2550
2551                         if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2552                             (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2553                                 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2554                                 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2555                                 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2556                         } else {
2557                                 __lpfc_sli_rpi_release(vport, ndlp);
2558                         }
2559                         if (vport->load_flag & FC_UNLOADING)
2560                                 lpfc_nlp_put(ndlp);
2561                         pmb->ctx_ndlp = NULL;
2562                 }
2563         }
2564
2565         /* Check security permission status on INIT_LINK mailbox command */
2566         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2567             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2568                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2569                                 "2860 SLI authentication is required "
2570                                 "for INIT_LINK but has not done yet\n");
2571
2572         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2573                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2574         else
2575                 mempool_free(pmb, phba->mbox_mem_pool);
2576 }
2577  /**
2578  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2579  * @phba: Pointer to HBA context object.
2580  * @pmb: Pointer to mailbox object.
2581  *
2582  * This function is the unreg rpi mailbox completion handler. It
2583  * frees the memory resources associated with the completed mailbox
2584  * command. An additional refrenece is put on the ndlp to prevent
2585  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2586  * the unreg mailbox command completes, this routine puts the
2587  * reference back.
2588  *
2589  **/
2590 void
2591 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2592 {
2593         struct lpfc_vport  *vport = pmb->vport;
2594         struct lpfc_nodelist *ndlp;
2595
2596         ndlp = pmb->ctx_ndlp;
2597         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2598                 if (phba->sli_rev == LPFC_SLI_REV4 &&
2599                     (bf_get(lpfc_sli_intf_if_type,
2600                      &phba->sli4_hba.sli_intf) >=
2601                      LPFC_SLI_INTF_IF_TYPE_2)) {
2602                         if (ndlp) {
2603                                 lpfc_printf_vlog(
2604                                         vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2605                                          "0010 UNREG_LOGIN vpi:%x "
2606                                          "rpi:%x DID:%x defer x%x flg x%x "
2607                                          "map:%x %px\n",
2608                                          vport->vpi, ndlp->nlp_rpi,
2609                                          ndlp->nlp_DID, ndlp->nlp_defer_did,
2610                                          ndlp->nlp_flag,
2611                                          ndlp->nlp_usg_map, ndlp);
2612                                 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2613                                 lpfc_nlp_put(ndlp);
2614
2615                                 /* Check to see if there are any deferred
2616                                  * events to process
2617                                  */
2618                                 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2619                                     (ndlp->nlp_defer_did !=
2620                                     NLP_EVT_NOTHING_PENDING)) {
2621                                         lpfc_printf_vlog(
2622                                                 vport, KERN_INFO, LOG_DISCOVERY,
2623                                                 "4111 UNREG cmpl deferred "
2624                                                 "clr x%x on "
2625                                                 "NPort x%x Data: x%x x%px\n",
2626                                                 ndlp->nlp_rpi, ndlp->nlp_DID,
2627                                                 ndlp->nlp_defer_did, ndlp);
2628                                         ndlp->nlp_flag &= ~NLP_UNREG_INP;
2629                                         ndlp->nlp_defer_did =
2630                                                 NLP_EVT_NOTHING_PENDING;
2631                                         lpfc_issue_els_plogi(
2632                                                 vport, ndlp->nlp_DID, 0);
2633                                 } else {
2634                                         __lpfc_sli_rpi_release(vport, ndlp);
2635                                 }
2636                         }
2637                 }
2638         }
2639
2640         mempool_free(pmb, phba->mbox_mem_pool);
2641 }
2642
2643 /**
2644  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2645  * @phba: Pointer to HBA context object.
2646  *
2647  * This function is called with no lock held. This function processes all
2648  * the completed mailbox commands and gives it to upper layers. The interrupt
2649  * service routine processes mailbox completion interrupt and adds completed
2650  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2651  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2652  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2653  * function returns the mailbox commands to the upper layer by calling the
2654  * completion handler function of each mailbox.
2655  **/
2656 int
2657 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2658 {
2659         MAILBOX_t *pmbox;
2660         LPFC_MBOXQ_t *pmb;
2661         int rc;
2662         LIST_HEAD(cmplq);
2663
2664         phba->sli.slistat.mbox_event++;
2665
2666         /* Get all completed mailboxe buffers into the cmplq */
2667         spin_lock_irq(&phba->hbalock);
2668         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2669         spin_unlock_irq(&phba->hbalock);
2670
2671         /* Get a Mailbox buffer to setup mailbox commands for callback */
2672         do {
2673                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2674                 if (pmb == NULL)
2675                         break;
2676
2677                 pmbox = &pmb->u.mb;
2678
2679                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2680                         if (pmb->vport) {
2681                                 lpfc_debugfs_disc_trc(pmb->vport,
2682                                         LPFC_DISC_TRC_MBOX_VPORT,
2683                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2684                                         (uint32_t)pmbox->mbxCommand,
2685                                         pmbox->un.varWords[0],
2686                                         pmbox->un.varWords[1]);
2687                         }
2688                         else {
2689                                 lpfc_debugfs_disc_trc(phba->pport,
2690                                         LPFC_DISC_TRC_MBOX,
2691                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2692                                         (uint32_t)pmbox->mbxCommand,
2693                                         pmbox->un.varWords[0],
2694                                         pmbox->un.varWords[1]);
2695                         }
2696                 }
2697
2698                 /*
2699                  * It is a fatal error if unknown mbox command completion.
2700                  */
2701                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2702                     MBX_SHUTDOWN) {
2703                         /* Unknown mailbox command compl */
2704                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2705                                         "(%d):0323 Unknown Mailbox command "
2706                                         "x%x (x%x/x%x) Cmpl\n",
2707                                         pmb->vport ? pmb->vport->vpi :
2708                                         LPFC_VPORT_UNKNOWN,
2709                                         pmbox->mbxCommand,
2710                                         lpfc_sli_config_mbox_subsys_get(phba,
2711                                                                         pmb),
2712                                         lpfc_sli_config_mbox_opcode_get(phba,
2713                                                                         pmb));
2714                         phba->link_state = LPFC_HBA_ERROR;
2715                         phba->work_hs = HS_FFER3;
2716                         lpfc_handle_eratt(phba);
2717                         continue;
2718                 }
2719
2720                 if (pmbox->mbxStatus) {
2721                         phba->sli.slistat.mbox_stat_err++;
2722                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2723                                 /* Mbox cmd cmpl error - RETRYing */
2724                                 lpfc_printf_log(phba, KERN_INFO,
2725                                         LOG_MBOX | LOG_SLI,
2726                                         "(%d):0305 Mbox cmd cmpl "
2727                                         "error - RETRYing Data: x%x "
2728                                         "(x%x/x%x) x%x x%x x%x\n",
2729                                         pmb->vport ? pmb->vport->vpi :
2730                                         LPFC_VPORT_UNKNOWN,
2731                                         pmbox->mbxCommand,
2732                                         lpfc_sli_config_mbox_subsys_get(phba,
2733                                                                         pmb),
2734                                         lpfc_sli_config_mbox_opcode_get(phba,
2735                                                                         pmb),
2736                                         pmbox->mbxStatus,
2737                                         pmbox->un.varWords[0],
2738                                         pmb->vport ? pmb->vport->port_state :
2739                                         LPFC_VPORT_UNKNOWN);
2740                                 pmbox->mbxStatus = 0;
2741                                 pmbox->mbxOwner = OWN_HOST;
2742                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2743                                 if (rc != MBX_NOT_FINISHED)
2744                                         continue;
2745                         }
2746                 }
2747
2748                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2749                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2750                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2751                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2752                                 "x%x x%x x%x\n",
2753                                 pmb->vport ? pmb->vport->vpi : 0,
2754                                 pmbox->mbxCommand,
2755                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2756                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2757                                 pmb->mbox_cmpl,
2758                                 *((uint32_t *) pmbox),
2759                                 pmbox->un.varWords[0],
2760                                 pmbox->un.varWords[1],
2761                                 pmbox->un.varWords[2],
2762                                 pmbox->un.varWords[3],
2763                                 pmbox->un.varWords[4],
2764                                 pmbox->un.varWords[5],
2765                                 pmbox->un.varWords[6],
2766                                 pmbox->un.varWords[7],
2767                                 pmbox->un.varWords[8],
2768                                 pmbox->un.varWords[9],
2769                                 pmbox->un.varWords[10]);
2770
2771                 if (pmb->mbox_cmpl)
2772                         pmb->mbox_cmpl(phba,pmb);
2773         } while (1);
2774         return 0;
2775 }
2776
2777 /**
2778  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2779  * @phba: Pointer to HBA context object.
2780  * @pring: Pointer to driver SLI ring object.
2781  * @tag: buffer tag.
2782  *
2783  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2784  * is set in the tag the buffer is posted for a particular exchange,
2785  * the function will return the buffer without replacing the buffer.
2786  * If the buffer is for unsolicited ELS or CT traffic, this function
2787  * returns the buffer and also posts another buffer to the firmware.
2788  **/
2789 static struct lpfc_dmabuf *
2790 lpfc_sli_get_buff(struct lpfc_hba *phba,
2791                   struct lpfc_sli_ring *pring,
2792                   uint32_t tag)
2793 {
2794         struct hbq_dmabuf *hbq_entry;
2795
2796         if (tag & QUE_BUFTAG_BIT)
2797                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2798         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2799         if (!hbq_entry)
2800                 return NULL;
2801         return &hbq_entry->dbuf;
2802 }
2803
2804 /**
2805  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2806  * @phba: Pointer to HBA context object.
2807  * @pring: Pointer to driver SLI ring object.
2808  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2809  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2810  * @fch_type: the type for the first frame of the sequence.
2811  *
2812  * This function is called with no lock held. This function uses the r_ctl and
2813  * type of the received sequence to find the correct callback function to call
2814  * to process the sequence.
2815  **/
2816 static int
2817 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2818                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2819                          uint32_t fch_type)
2820 {
2821         int i;
2822
2823         switch (fch_type) {
2824         case FC_TYPE_NVME:
2825                 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2826                 return 1;
2827         default:
2828                 break;
2829         }
2830
2831         /* unSolicited Responses */
2832         if (pring->prt[0].profile) {
2833                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2834                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2835                                                                         saveq);
2836                 return 1;
2837         }
2838         /* We must search, based on rctl / type
2839            for the right routine */
2840         for (i = 0; i < pring->num_mask; i++) {
2841                 if ((pring->prt[i].rctl == fch_r_ctl) &&
2842                     (pring->prt[i].type == fch_type)) {
2843                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2844                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2845                                                 (phba, pring, saveq);
2846                         return 1;
2847                 }
2848         }
2849         return 0;
2850 }
2851
2852 /**
2853  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2854  * @phba: Pointer to HBA context object.
2855  * @pring: Pointer to driver SLI ring object.
2856  * @saveq: Pointer to the unsolicited iocb.
2857  *
2858  * This function is called with no lock held by the ring event handler
2859  * when there is an unsolicited iocb posted to the response ring by the
2860  * firmware. This function gets the buffer associated with the iocbs
2861  * and calls the event handler for the ring. This function handles both
2862  * qring buffers and hbq buffers.
2863  * When the function returns 1 the caller can free the iocb object otherwise
2864  * upper layer functions will free the iocb objects.
2865  **/
2866 static int
2867 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2868                             struct lpfc_iocbq *saveq)
2869 {
2870         IOCB_t           * irsp;
2871         WORD5            * w5p;
2872         uint32_t           Rctl, Type;
2873         struct lpfc_iocbq *iocbq;
2874         struct lpfc_dmabuf *dmzbuf;
2875
2876         irsp = &(saveq->iocb);
2877
2878         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2879                 if (pring->lpfc_sli_rcv_async_status)
2880                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2881                 else
2882                         lpfc_printf_log(phba,
2883                                         KERN_WARNING,
2884                                         LOG_SLI,
2885                                         "0316 Ring %d handler: unexpected "
2886                                         "ASYNC_STATUS iocb received evt_code "
2887                                         "0x%x\n",
2888                                         pring->ringno,
2889                                         irsp->un.asyncstat.evt_code);
2890                 return 1;
2891         }
2892
2893         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2894                 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2895                 if (irsp->ulpBdeCount > 0) {
2896                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2897                                         irsp->un.ulpWord[3]);
2898                         lpfc_in_buf_free(phba, dmzbuf);
2899                 }
2900
2901                 if (irsp->ulpBdeCount > 1) {
2902                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2903                                         irsp->unsli3.sli3Words[3]);
2904                         lpfc_in_buf_free(phba, dmzbuf);
2905                 }
2906
2907                 if (irsp->ulpBdeCount > 2) {
2908                         dmzbuf = lpfc_sli_get_buff(phba, pring,
2909                                 irsp->unsli3.sli3Words[7]);
2910                         lpfc_in_buf_free(phba, dmzbuf);
2911                 }
2912
2913                 return 1;
2914         }
2915
2916         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2917                 if (irsp->ulpBdeCount != 0) {
2918                         saveq->context2 = lpfc_sli_get_buff(phba, pring,
2919                                                 irsp->un.ulpWord[3]);
2920                         if (!saveq->context2)
2921                                 lpfc_printf_log(phba,
2922                                         KERN_ERR,
2923                                         LOG_SLI,
2924                                         "0341 Ring %d Cannot find buffer for "
2925                                         "an unsolicited iocb. tag 0x%x\n",
2926                                         pring->ringno,
2927                                         irsp->un.ulpWord[3]);
2928                 }
2929                 if (irsp->ulpBdeCount == 2) {
2930                         saveq->context3 = lpfc_sli_get_buff(phba, pring,
2931                                                 irsp->unsli3.sli3Words[7]);
2932                         if (!saveq->context3)
2933                                 lpfc_printf_log(phba,
2934                                         KERN_ERR,
2935                                         LOG_SLI,
2936                                         "0342 Ring %d Cannot find buffer for an"
2937                                         " unsolicited iocb. tag 0x%x\n",
2938                                         pring->ringno,
2939                                         irsp->unsli3.sli3Words[7]);
2940                 }
2941                 list_for_each_entry(iocbq, &saveq->list, list) {
2942                         irsp = &(iocbq->iocb);
2943                         if (irsp->ulpBdeCount != 0) {
2944                                 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2945                                                         irsp->un.ulpWord[3]);
2946                                 if (!iocbq->context2)
2947                                         lpfc_printf_log(phba,
2948                                                 KERN_ERR,
2949                                                 LOG_SLI,
2950                                                 "0343 Ring %d Cannot find "
2951                                                 "buffer for an unsolicited iocb"
2952                                                 ". tag 0x%x\n", pring->ringno,
2953                                                 irsp->un.ulpWord[3]);
2954                         }
2955                         if (irsp->ulpBdeCount == 2) {
2956                                 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2957                                                 irsp->unsli3.sli3Words[7]);
2958                                 if (!iocbq->context3)
2959                                         lpfc_printf_log(phba,
2960                                                 KERN_ERR,
2961                                                 LOG_SLI,
2962                                                 "0344 Ring %d Cannot find "
2963                                                 "buffer for an unsolicited "
2964                                                 "iocb. tag 0x%x\n",
2965                                                 pring->ringno,
2966                                                 irsp->unsli3.sli3Words[7]);
2967                         }
2968                 }
2969         }
2970         if (irsp->ulpBdeCount != 0 &&
2971             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2972              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2973                 int found = 0;
2974
2975                 /* search continue save q for same XRI */
2976                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2977                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2978                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
2979                                 list_add_tail(&saveq->list, &iocbq->list);
2980                                 found = 1;
2981                                 break;
2982                         }
2983                 }
2984                 if (!found)
2985                         list_add_tail(&saveq->clist,
2986                                       &pring->iocb_continue_saveq);
2987                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2988                         list_del_init(&iocbq->clist);
2989                         saveq = iocbq;
2990                         irsp = &(saveq->iocb);
2991                 } else
2992                         return 0;
2993         }
2994         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2995             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2996             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2997                 Rctl = FC_RCTL_ELS_REQ;
2998                 Type = FC_TYPE_ELS;
2999         } else {
3000                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3001                 Rctl = w5p->hcsw.Rctl;
3002                 Type = w5p->hcsw.Type;
3003
3004                 /* Firmware Workaround */
3005                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3006                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3007                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3008                         Rctl = FC_RCTL_ELS_REQ;
3009                         Type = FC_TYPE_ELS;
3010                         w5p->hcsw.Rctl = Rctl;
3011                         w5p->hcsw.Type = Type;
3012                 }
3013         }
3014
3015         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3016                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3017                                 "0313 Ring %d handler: unexpected Rctl x%x "
3018                                 "Type x%x received\n",
3019                                 pring->ringno, Rctl, Type);
3020
3021         return 1;
3022 }
3023
3024 /**
3025  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3026  * @phba: Pointer to HBA context object.
3027  * @pring: Pointer to driver SLI ring object.
3028  * @prspiocb: Pointer to response iocb object.
3029  *
3030  * This function looks up the iocb_lookup table to get the command iocb
3031  * corresponding to the given response iocb using the iotag of the
3032  * response iocb. The driver calls this function with the hbalock held
3033  * for SLI3 ports or the ring lock held for SLI4 ports.
3034  * This function returns the command iocb object if it finds the command
3035  * iocb else returns NULL.
3036  **/
3037 static struct lpfc_iocbq *
3038 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3039                       struct lpfc_sli_ring *pring,
3040                       struct lpfc_iocbq *prspiocb)
3041 {
3042         struct lpfc_iocbq *cmd_iocb = NULL;
3043         uint16_t iotag;
3044         spinlock_t *temp_lock = NULL;
3045         unsigned long iflag = 0;
3046
3047         if (phba->sli_rev == LPFC_SLI_REV4)
3048                 temp_lock = &pring->ring_lock;
3049         else
3050                 temp_lock = &phba->hbalock;
3051
3052         spin_lock_irqsave(temp_lock, iflag);
3053         iotag = prspiocb->iocb.ulpIoTag;
3054
3055         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3056                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3057                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3058                         /* remove from txcmpl queue list */
3059                         list_del_init(&cmd_iocb->list);
3060                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3061                         pring->txcmplq_cnt--;
3062                         spin_unlock_irqrestore(temp_lock, iflag);
3063                         return cmd_iocb;
3064                 }
3065         }
3066
3067         spin_unlock_irqrestore(temp_lock, iflag);
3068         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3069                         "0317 iotag x%x is out of "
3070                         "range: max iotag x%x wd0 x%x\n",
3071                         iotag, phba->sli.last_iotag,
3072                         *(((uint32_t *) &prspiocb->iocb) + 7));
3073         return NULL;
3074 }
3075
3076 /**
3077  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3078  * @phba: Pointer to HBA context object.
3079  * @pring: Pointer to driver SLI ring object.
3080  * @iotag: IOCB tag.
3081  *
3082  * This function looks up the iocb_lookup table to get the command iocb
3083  * corresponding to the given iotag. The driver calls this function with
3084  * the ring lock held because this function is an SLI4 port only helper.
3085  * This function returns the command iocb object if it finds the command
3086  * iocb else returns NULL.
3087  **/
3088 static struct lpfc_iocbq *
3089 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3090                              struct lpfc_sli_ring *pring, uint16_t iotag)
3091 {
3092         struct lpfc_iocbq *cmd_iocb = NULL;
3093         spinlock_t *temp_lock = NULL;
3094         unsigned long iflag = 0;
3095
3096         if (phba->sli_rev == LPFC_SLI_REV4)
3097                 temp_lock = &pring->ring_lock;
3098         else
3099                 temp_lock = &phba->hbalock;
3100
3101         spin_lock_irqsave(temp_lock, iflag);
3102         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3103                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3104                 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3105                         /* remove from txcmpl queue list */
3106                         list_del_init(&cmd_iocb->list);
3107                         cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3108                         pring->txcmplq_cnt--;
3109                         spin_unlock_irqrestore(temp_lock, iflag);
3110                         return cmd_iocb;
3111                 }
3112         }
3113
3114         spin_unlock_irqrestore(temp_lock, iflag);
3115         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3116                         "0372 iotag x%x lookup error: max iotag (x%x) "
3117                         "iocb_flag x%x\n",
3118                         iotag, phba->sli.last_iotag,
3119                         cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3120         return NULL;
3121 }
3122
3123 /**
3124  * lpfc_sli_process_sol_iocb - process solicited iocb completion
3125  * @phba: Pointer to HBA context object.
3126  * @pring: Pointer to driver SLI ring object.
3127  * @saveq: Pointer to the response iocb to be processed.
3128  *
3129  * This function is called by the ring event handler for non-fcp
3130  * rings when there is a new response iocb in the response ring.
3131  * The caller is not required to hold any locks. This function
3132  * gets the command iocb associated with the response iocb and
3133  * calls the completion handler for the command iocb. If there
3134  * is no completion handler, the function will free the resources
3135  * associated with command iocb. If the response iocb is for
3136  * an already aborted command iocb, the status of the completion
3137  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3138  * This function always returns 1.
3139  **/
3140 static int
3141 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3142                           struct lpfc_iocbq *saveq)
3143 {
3144         struct lpfc_iocbq *cmdiocbp;
3145         int rc = 1;
3146         unsigned long iflag;
3147
3148         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3149         if (cmdiocbp) {
3150                 if (cmdiocbp->iocb_cmpl) {
3151                         /*
3152                          * If an ELS command failed send an event to mgmt
3153                          * application.
3154                          */
3155                         if (saveq->iocb.ulpStatus &&
3156                              (pring->ringno == LPFC_ELS_RING) &&
3157                              (cmdiocbp->iocb.ulpCommand ==
3158                                 CMD_ELS_REQUEST64_CR))
3159                                 lpfc_send_els_failure_event(phba,
3160                                         cmdiocbp, saveq);
3161
3162                         /*
3163                          * Post all ELS completions to the worker thread.
3164                          * All other are passed to the completion callback.
3165                          */
3166                         if (pring->ringno == LPFC_ELS_RING) {
3167                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3168                                     (cmdiocbp->iocb_flag &
3169                                                         LPFC_DRIVER_ABORTED)) {
3170                                         spin_lock_irqsave(&phba->hbalock,
3171                                                           iflag);
3172                                         cmdiocbp->iocb_flag &=
3173                                                 ~LPFC_DRIVER_ABORTED;
3174                                         spin_unlock_irqrestore(&phba->hbalock,
3175                                                                iflag);
3176                                         saveq->iocb.ulpStatus =
3177                                                 IOSTAT_LOCAL_REJECT;
3178                                         saveq->iocb.un.ulpWord[4] =
3179                                                 IOERR_SLI_ABORTED;
3180
3181                                         /* Firmware could still be in progress
3182                                          * of DMAing payload, so don't free data
3183                                          * buffer till after a hbeat.
3184                                          */
3185                                         spin_lock_irqsave(&phba->hbalock,
3186                                                           iflag);
3187                                         saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3188                                         spin_unlock_irqrestore(&phba->hbalock,
3189                                                                iflag);
3190                                 }
3191                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3192                                         if (saveq->iocb_flag &
3193                                             LPFC_EXCHANGE_BUSY) {
3194                                                 /* Set cmdiocb flag for the
3195                                                  * exchange busy so sgl (xri)
3196                                                  * will not be released until
3197                                                  * the abort xri is received
3198                                                  * from hba.
3199                                                  */
3200                                                 spin_lock_irqsave(
3201                                                         &phba->hbalock, iflag);
3202                                                 cmdiocbp->iocb_flag |=
3203                                                         LPFC_EXCHANGE_BUSY;
3204                                                 spin_unlock_irqrestore(
3205                                                         &phba->hbalock, iflag);
3206                                         }
3207                                         if (cmdiocbp->iocb_flag &
3208                                             LPFC_DRIVER_ABORTED) {
3209                                                 /*
3210                                                  * Clear LPFC_DRIVER_ABORTED
3211                                                  * bit in case it was driver
3212                                                  * initiated abort.
3213                                                  */
3214                                                 spin_lock_irqsave(
3215                                                         &phba->hbalock, iflag);
3216                                                 cmdiocbp->iocb_flag &=
3217                                                         ~LPFC_DRIVER_ABORTED;
3218                                                 spin_unlock_irqrestore(
3219                                                         &phba->hbalock, iflag);
3220                                                 cmdiocbp->iocb.ulpStatus =
3221                                                         IOSTAT_LOCAL_REJECT;
3222                                                 cmdiocbp->iocb.un.ulpWord[4] =
3223                                                         IOERR_ABORT_REQUESTED;
3224                                                 /*
3225                                                  * For SLI4, irsiocb contains
3226                                                  * NO_XRI in sli_xritag, it
3227                                                  * shall not affect releasing
3228                                                  * sgl (xri) process.
3229                                                  */
3230                                                 saveq->iocb.ulpStatus =
3231                                                         IOSTAT_LOCAL_REJECT;
3232                                                 saveq->iocb.un.ulpWord[4] =
3233                                                         IOERR_SLI_ABORTED;
3234                                                 spin_lock_irqsave(
3235                                                         &phba->hbalock, iflag);
3236                                                 saveq->iocb_flag |=
3237                                                         LPFC_DELAY_MEM_FREE;
3238                                                 spin_unlock_irqrestore(
3239                                                         &phba->hbalock, iflag);
3240                                         }
3241                                 }
3242                         }
3243                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3244                 } else
3245                         lpfc_sli_release_iocbq(phba, cmdiocbp);
3246         } else {
3247                 /*
3248                  * Unknown initiating command based on the response iotag.
3249                  * This could be the case on the ELS ring because of
3250                  * lpfc_els_abort().
3251                  */
3252                 if (pring->ringno != LPFC_ELS_RING) {
3253                         /*
3254                          * Ring <ringno> handler: unexpected completion IoTag
3255                          * <IoTag>
3256                          */
3257                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3258                                          "0322 Ring %d handler: "
3259                                          "unexpected completion IoTag x%x "
3260                                          "Data: x%x x%x x%x x%x\n",
3261                                          pring->ringno,
3262                                          saveq->iocb.ulpIoTag,
3263                                          saveq->iocb.ulpStatus,
3264                                          saveq->iocb.un.ulpWord[4],
3265                                          saveq->iocb.ulpCommand,
3266                                          saveq->iocb.ulpContext);
3267                 }
3268         }
3269
3270         return rc;
3271 }
3272
3273 /**
3274  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3275  * @phba: Pointer to HBA context object.
3276  * @pring: Pointer to driver SLI ring object.
3277  *
3278  * This function is called from the iocb ring event handlers when
3279  * put pointer is ahead of the get pointer for a ring. This function signal
3280  * an error attention condition to the worker thread and the worker
3281  * thread will transition the HBA to offline state.
3282  **/
3283 static void
3284 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3285 {
3286         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3287         /*
3288          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3289          * rsp ring <portRspMax>
3290          */
3291         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3292                         "0312 Ring %d handler: portRspPut %d "
3293                         "is bigger than rsp ring %d\n",
3294                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
3295                         pring->sli.sli3.numRiocb);
3296
3297         phba->link_state = LPFC_HBA_ERROR;
3298
3299         /*
3300          * All error attention handlers are posted to
3301          * worker thread
3302          */
3303         phba->work_ha |= HA_ERATT;
3304         phba->work_hs = HS_FFER3;
3305
3306         lpfc_worker_wake_up(phba);
3307
3308         return;
3309 }
3310
3311 /**
3312  * lpfc_poll_eratt - Error attention polling timer timeout handler
3313  * @ptr: Pointer to address of HBA context object.
3314  *
3315  * This function is invoked by the Error Attention polling timer when the
3316  * timer times out. It will check the SLI Error Attention register for
3317  * possible attention events. If so, it will post an Error Attention event
3318  * and wake up worker thread to process it. Otherwise, it will set up the
3319  * Error Attention polling timer for the next poll.
3320  **/
3321 void lpfc_poll_eratt(struct timer_list *t)
3322 {
3323         struct lpfc_hba *phba;
3324         uint32_t eratt = 0;
3325         uint64_t sli_intr, cnt;
3326
3327         phba = from_timer(phba, t, eratt_poll);
3328
3329         /* Here we will also keep track of interrupts per sec of the hba */
3330         sli_intr = phba->sli.slistat.sli_intr;
3331
3332         if (phba->sli.slistat.sli_prev_intr > sli_intr)
3333                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3334                         sli_intr);
3335         else
3336                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3337
3338         /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3339         do_div(cnt, phba->eratt_poll_interval);
3340         phba->sli.slistat.sli_ips = cnt;
3341
3342         phba->sli.slistat.sli_prev_intr = sli_intr;
3343
3344         /* Check chip HA register for error event */
3345         eratt = lpfc_sli_check_eratt(phba);
3346
3347         if (eratt)
3348                 /* Tell the worker thread there is work to do */
3349                 lpfc_worker_wake_up(phba);
3350         else
3351                 /* Restart the timer for next eratt poll */
3352                 mod_timer(&phba->eratt_poll,
3353                           jiffies +
3354                           msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3355         return;
3356 }
3357
3358
3359 /**
3360  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3361  * @phba: Pointer to HBA context object.
3362  * @pring: Pointer to driver SLI ring object.
3363  * @mask: Host attention register mask for this ring.
3364  *
3365  * This function is called from the interrupt context when there is a ring
3366  * event for the fcp ring. The caller does not hold any lock.
3367  * The function processes each response iocb in the response ring until it
3368  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3369  * LE bit set. The function will call the completion handler of the command iocb
3370  * if the response iocb indicates a completion for a command iocb or it is
3371  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3372  * function if this is an unsolicited iocb.
3373  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3374  * to check it explicitly.
3375  */
3376 int
3377 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3378                                 struct lpfc_sli_ring *pring, uint32_t mask)
3379 {
3380         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3381         IOCB_t *irsp = NULL;
3382         IOCB_t *entry = NULL;
3383         struct lpfc_iocbq *cmdiocbq = NULL;
3384         struct lpfc_iocbq rspiocbq;
3385         uint32_t status;
3386         uint32_t portRspPut, portRspMax;
3387         int rc = 1;
3388         lpfc_iocb_type type;
3389         unsigned long iflag;
3390         uint32_t rsp_cmpl = 0;
3391
3392         spin_lock_irqsave(&phba->hbalock, iflag);
3393         pring->stats.iocb_event++;
3394
3395         /*
3396          * The next available response entry should never exceed the maximum
3397          * entries.  If it does, treat it as an adapter hardware error.
3398          */
3399         portRspMax = pring->sli.sli3.numRiocb;
3400         portRspPut = le32_to_cpu(pgp->rspPutInx);
3401         if (unlikely(portRspPut >= portRspMax)) {
3402                 lpfc_sli_rsp_pointers_error(phba, pring);
3403                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3404                 return 1;
3405         }
3406         if (phba->fcp_ring_in_use) {
3407                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3408                 return 1;
3409         } else
3410                 phba->fcp_ring_in_use = 1;
3411
3412         rmb();
3413         while (pring->sli.sli3.rspidx != portRspPut) {
3414                 /*
3415                  * Fetch an entry off the ring and copy it into a local data
3416                  * structure.  The copy involves a byte-swap since the
3417                  * network byte order and pci byte orders are different.
3418                  */
3419                 entry = lpfc_resp_iocb(phba, pring);
3420                 phba->last_completion_time = jiffies;
3421
3422                 if (++pring->sli.sli3.rspidx >= portRspMax)
3423                         pring->sli.sli3.rspidx = 0;
3424
3425                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3426                                       (uint32_t *) &rspiocbq.iocb,
3427                                       phba->iocb_rsp_size);
3428                 INIT_LIST_HEAD(&(rspiocbq.list));
3429                 irsp = &rspiocbq.iocb;
3430
3431                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3432                 pring->stats.iocb_rsp++;
3433                 rsp_cmpl++;
3434
3435                 if (unlikely(irsp->ulpStatus)) {
3436                         /*
3437                          * If resource errors reported from HBA, reduce
3438                          * queuedepths of the SCSI device.
3439                          */
3440                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3441                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3442                              IOERR_NO_RESOURCES)) {
3443                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3444                                 phba->lpfc_rampdown_queue_depth(phba);
3445                                 spin_lock_irqsave(&phba->hbalock, iflag);
3446                         }
3447
3448                         /* Rsp ring <ringno> error: IOCB */
3449                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3450                                         "0336 Rsp Ring %d error: IOCB Data: "
3451                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3452                                         pring->ringno,
3453                                         irsp->un.ulpWord[0],
3454                                         irsp->un.ulpWord[1],
3455                                         irsp->un.ulpWord[2],
3456                                         irsp->un.ulpWord[3],
3457                                         irsp->un.ulpWord[4],
3458                                         irsp->un.ulpWord[5],
3459                                         *(uint32_t *)&irsp->un1,
3460                                         *((uint32_t *)&irsp->un1 + 1));
3461                 }
3462
3463                 switch (type) {
3464                 case LPFC_ABORT_IOCB:
3465                 case LPFC_SOL_IOCB:
3466                         /*
3467                          * Idle exchange closed via ABTS from port.  No iocb
3468                          * resources need to be recovered.
3469                          */
3470                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3471                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3472                                                 "0333 IOCB cmd 0x%x"
3473                                                 " processed. Skipping"
3474                                                 " completion\n",
3475                                                 irsp->ulpCommand);
3476                                 break;
3477                         }
3478
3479                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3480                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3481                                                          &rspiocbq);
3482                         spin_lock_irqsave(&phba->hbalock, iflag);
3483                         if (unlikely(!cmdiocbq))
3484                                 break;
3485                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3486                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3487                         if (cmdiocbq->iocb_cmpl) {
3488                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3489                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3490                                                       &rspiocbq);
3491                                 spin_lock_irqsave(&phba->hbalock, iflag);
3492                         }
3493                         break;
3494                 case LPFC_UNSOL_IOCB:
3495                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3496                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3497                         spin_lock_irqsave(&phba->hbalock, iflag);
3498                         break;
3499                 default:
3500                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3501                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3502                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3503                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3504                                        MAX_MSG_DATA);
3505                                 dev_warn(&((phba->pcidev)->dev),
3506                                          "lpfc%d: %s\n",
3507                                          phba->brd_no, adaptermsg);
3508                         } else {
3509                                 /* Unknown IOCB command */
3510                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3511                                                 "0334 Unknown IOCB command "
3512                                                 "Data: x%x, x%x x%x x%x x%x\n",
3513                                                 type, irsp->ulpCommand,
3514                                                 irsp->ulpStatus,
3515                                                 irsp->ulpIoTag,
3516                                                 irsp->ulpContext);
3517                         }
3518                         break;
3519                 }
3520
3521                 /*
3522                  * The response IOCB has been processed.  Update the ring
3523                  * pointer in SLIM.  If the port response put pointer has not
3524                  * been updated, sync the pgp->rspPutInx and fetch the new port
3525                  * response put pointer.
3526                  */
3527                 writel(pring->sli.sli3.rspidx,
3528                         &phba->host_gp[pring->ringno].rspGetInx);
3529
3530                 if (pring->sli.sli3.rspidx == portRspPut)
3531                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3532         }
3533
3534         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3535                 pring->stats.iocb_rsp_full++;
3536                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3537                 writel(status, phba->CAregaddr);
3538                 readl(phba->CAregaddr);
3539         }
3540         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3541                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3542                 pring->stats.iocb_cmd_empty++;
3543
3544                 /* Force update of the local copy of cmdGetInx */
3545                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3546                 lpfc_sli_resume_iocb(phba, pring);
3547
3548                 if ((pring->lpfc_sli_cmd_available))
3549                         (pring->lpfc_sli_cmd_available) (phba, pring);
3550
3551         }
3552
3553         phba->fcp_ring_in_use = 0;
3554         spin_unlock_irqrestore(&phba->hbalock, iflag);
3555         return rc;
3556 }
3557
3558 /**
3559  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3560  * @phba: Pointer to HBA context object.
3561  * @pring: Pointer to driver SLI ring object.
3562  * @rspiocbp: Pointer to driver response IOCB object.
3563  *
3564  * This function is called from the worker thread when there is a slow-path
3565  * response IOCB to process. This function chains all the response iocbs until
3566  * seeing the iocb with the LE bit set. The function will call
3567  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3568  * completion of a command iocb. The function will call the
3569  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3570  * The function frees the resources or calls the completion handler if this
3571  * iocb is an abort completion. The function returns NULL when the response
3572  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3573  * this function shall chain the iocb on to the iocb_continueq and return the
3574  * response iocb passed in.
3575  **/
3576 static struct lpfc_iocbq *
3577 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3578                         struct lpfc_iocbq *rspiocbp)
3579 {
3580         struct lpfc_iocbq *saveq;
3581         struct lpfc_iocbq *cmdiocbp;
3582         struct lpfc_iocbq *next_iocb;
3583         IOCB_t *irsp = NULL;
3584         uint32_t free_saveq;
3585         uint8_t iocb_cmd_type;
3586         lpfc_iocb_type type;
3587         unsigned long iflag;
3588         int rc;
3589
3590         spin_lock_irqsave(&phba->hbalock, iflag);
3591         /* First add the response iocb to the countinueq list */
3592         list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3593         pring->iocb_continueq_cnt++;
3594
3595         /* Now, determine whether the list is completed for processing */
3596         irsp = &rspiocbp->iocb;
3597         if (irsp->ulpLe) {
3598                 /*
3599                  * By default, the driver expects to free all resources
3600                  * associated with this iocb completion.
3601                  */
3602                 free_saveq = 1;
3603                 saveq = list_get_first(&pring->iocb_continueq,
3604                                        struct lpfc_iocbq, list);
3605                 irsp = &(saveq->iocb);
3606                 list_del_init(&pring->iocb_continueq);
3607                 pring->iocb_continueq_cnt = 0;
3608
3609                 pring->stats.iocb_rsp++;
3610
3611                 /*
3612                  * If resource errors reported from HBA, reduce
3613                  * queuedepths of the SCSI device.
3614                  */
3615                 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3616                     ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3617                      IOERR_NO_RESOURCES)) {
3618                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3619                         phba->lpfc_rampdown_queue_depth(phba);
3620                         spin_lock_irqsave(&phba->hbalock, iflag);
3621                 }
3622
3623                 if (irsp->ulpStatus) {
3624                         /* Rsp ring <ringno> error: IOCB */
3625                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3626                                         "0328 Rsp Ring %d error: "
3627                                         "IOCB Data: "
3628                                         "x%x x%x x%x x%x "
3629                                         "x%x x%x x%x x%x "
3630                                         "x%x x%x x%x x%x "
3631                                         "x%x x%x x%x x%x\n",
3632                                         pring->ringno,
3633                                         irsp->un.ulpWord[0],
3634                                         irsp->un.ulpWord[1],
3635                                         irsp->un.ulpWord[2],
3636                                         irsp->un.ulpWord[3],
3637                                         irsp->un.ulpWord[4],
3638                                         irsp->un.ulpWord[5],
3639                                         *(((uint32_t *) irsp) + 6),
3640                                         *(((uint32_t *) irsp) + 7),
3641                                         *(((uint32_t *) irsp) + 8),
3642                                         *(((uint32_t *) irsp) + 9),
3643                                         *(((uint32_t *) irsp) + 10),
3644                                         *(((uint32_t *) irsp) + 11),
3645                                         *(((uint32_t *) irsp) + 12),
3646                                         *(((uint32_t *) irsp) + 13),
3647                                         *(((uint32_t *) irsp) + 14),
3648                                         *(((uint32_t *) irsp) + 15));
3649                 }
3650
3651                 /*
3652                  * Fetch the IOCB command type and call the correct completion
3653                  * routine. Solicited and Unsolicited IOCBs on the ELS ring
3654                  * get freed back to the lpfc_iocb_list by the discovery
3655                  * kernel thread.
3656                  */
3657                 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3658                 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3659                 switch (type) {
3660                 case LPFC_SOL_IOCB:
3661                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3662                         rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3663                         spin_lock_irqsave(&phba->hbalock, iflag);
3664                         break;
3665
3666                 case LPFC_UNSOL_IOCB:
3667                         spin_unlock_irqrestore(&phba->hbalock, iflag);
3668                         rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3669                         spin_lock_irqsave(&phba->hbalock, iflag);
3670                         if (!rc)
3671                                 free_saveq = 0;
3672                         break;
3673
3674                 case LPFC_ABORT_IOCB:
3675                         cmdiocbp = NULL;
3676                         if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3677                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3678                                 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3679                                                                  saveq);
3680                                 spin_lock_irqsave(&phba->hbalock, iflag);
3681                         }
3682                         if (cmdiocbp) {
3683                                 /* Call the specified completion routine */
3684                                 if (cmdiocbp->iocb_cmpl) {
3685                                         spin_unlock_irqrestore(&phba->hbalock,
3686                                                                iflag);
3687                                         (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3688                                                               saveq);
3689                                         spin_lock_irqsave(&phba->hbalock,
3690                                                           iflag);
3691                                 } else
3692                                         __lpfc_sli_release_iocbq(phba,
3693                                                                  cmdiocbp);
3694                         }
3695                         break;
3696
3697                 case LPFC_UNKNOWN_IOCB:
3698                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3699                                 char adaptermsg[LPFC_MAX_ADPTMSG];
3700                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3701                                 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3702                                        MAX_MSG_DATA);
3703                                 dev_warn(&((phba->pcidev)->dev),
3704                                          "lpfc%d: %s\n",
3705                                          phba->brd_no, adaptermsg);
3706                         } else {
3707                                 /* Unknown IOCB command */
3708                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3709                                                 "0335 Unknown IOCB "
3710                                                 "command Data: x%x "
3711                                                 "x%x x%x x%x\n",
3712                                                 irsp->ulpCommand,
3713                                                 irsp->ulpStatus,
3714                                                 irsp->ulpIoTag,
3715                                                 irsp->ulpContext);
3716                         }
3717                         break;
3718                 }
3719
3720                 if (free_saveq) {
3721                         list_for_each_entry_safe(rspiocbp, next_iocb,
3722                                                  &saveq->list, list) {
3723                                 list_del_init(&rspiocbp->list);
3724                                 __lpfc_sli_release_iocbq(phba, rspiocbp);
3725                         }
3726                         __lpfc_sli_release_iocbq(phba, saveq);
3727                 }
3728                 rspiocbp = NULL;
3729         }
3730         spin_unlock_irqrestore(&phba->hbalock, iflag);
3731         return rspiocbp;
3732 }
3733
3734 /**
3735  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3736  * @phba: Pointer to HBA context object.
3737  * @pring: Pointer to driver SLI ring object.
3738  * @mask: Host attention register mask for this ring.
3739  *
3740  * This routine wraps the actual slow_ring event process routine from the
3741  * API jump table function pointer from the lpfc_hba struct.
3742  **/
3743 void
3744 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3745                                 struct lpfc_sli_ring *pring, uint32_t mask)
3746 {
3747         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3748 }
3749
3750 /**
3751  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3752  * @phba: Pointer to HBA context object.
3753  * @pring: Pointer to driver SLI ring object.
3754  * @mask: Host attention register mask for this ring.
3755  *
3756  * This function is called from the worker thread when there is a ring event
3757  * for non-fcp rings. The caller does not hold any lock. The function will
3758  * remove each response iocb in the response ring and calls the handle
3759  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3760  **/
3761 static void
3762 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3763                                    struct lpfc_sli_ring *pring, uint32_t mask)
3764 {
3765         struct lpfc_pgp *pgp;
3766         IOCB_t *entry;
3767         IOCB_t *irsp = NULL;
3768         struct lpfc_iocbq *rspiocbp = NULL;
3769         uint32_t portRspPut, portRspMax;
3770         unsigned long iflag;
3771         uint32_t status;
3772
3773         pgp = &phba->port_gp[pring->ringno];
3774         spin_lock_irqsave(&phba->hbalock, iflag);
3775         pring->stats.iocb_event++;
3776
3777         /*
3778          * The next available response entry should never exceed the maximum
3779          * entries.  If it does, treat it as an adapter hardware error.
3780          */
3781         portRspMax = pring->sli.sli3.numRiocb;
3782         portRspPut = le32_to_cpu(pgp->rspPutInx);
3783         if (portRspPut >= portRspMax) {
3784                 /*
3785                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3786                  * rsp ring <portRspMax>
3787                  */
3788                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3789                                 "0303 Ring %d handler: portRspPut %d "
3790                                 "is bigger than rsp ring %d\n",
3791                                 pring->ringno, portRspPut, portRspMax);
3792
3793                 phba->link_state = LPFC_HBA_ERROR;
3794                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3795
3796                 phba->work_hs = HS_FFER3;
3797                 lpfc_handle_eratt(phba);
3798
3799                 return;
3800         }
3801
3802         rmb();
3803         while (pring->sli.sli3.rspidx != portRspPut) {
3804                 /*
3805                  * Build a completion list and call the appropriate handler.
3806                  * The process is to get the next available response iocb, get
3807                  * a free iocb from the list, copy the response data into the
3808                  * free iocb, insert to the continuation list, and update the
3809                  * next response index to slim.  This process makes response
3810                  * iocb's in the ring available to DMA as fast as possible but
3811                  * pays a penalty for a copy operation.  Since the iocb is
3812                  * only 32 bytes, this penalty is considered small relative to
3813                  * the PCI reads for register values and a slim write.  When
3814                  * the ulpLe field is set, the entire Command has been
3815                  * received.
3816                  */
3817                 entry = lpfc_resp_iocb(phba, pring);
3818
3819                 phba->last_completion_time = jiffies;
3820                 rspiocbp = __lpfc_sli_get_iocbq(phba);
3821                 if (rspiocbp == NULL) {
3822                         printk(KERN_ERR "%s: out of buffers! Failing "
3823                                "completion.\n", __func__);
3824                         break;
3825                 }
3826
3827                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3828                                       phba->iocb_rsp_size);
3829                 irsp = &rspiocbp->iocb;
3830
3831                 if (++pring->sli.sli3.rspidx >= portRspMax)
3832                         pring->sli.sli3.rspidx = 0;
3833
3834                 if (pring->ringno == LPFC_ELS_RING) {
3835                         lpfc_debugfs_slow_ring_trc(phba,
3836                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3837                                 *(((uint32_t *) irsp) + 4),
3838                                 *(((uint32_t *) irsp) + 6),
3839                                 *(((uint32_t *) irsp) + 7));
3840                 }
3841
3842                 writel(pring->sli.sli3.rspidx,
3843                         &phba->host_gp[pring->ringno].rspGetInx);
3844
3845                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3846                 /* Handle the response IOCB */
3847                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3848                 spin_lock_irqsave(&phba->hbalock, iflag);
3849
3850                 /*
3851                  * If the port response put pointer has not been updated, sync
3852                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3853                  * response put pointer.
3854                  */
3855                 if (pring->sli.sli3.rspidx == portRspPut) {
3856                         portRspPut = le32_to_cpu(pgp->rspPutInx);
3857                 }
3858         } /* while (pring->sli.sli3.rspidx != portRspPut) */
3859
3860         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3861                 /* At least one response entry has been freed */
3862                 pring->stats.iocb_rsp_full++;
3863                 /* SET RxRE_RSP in Chip Att register */
3864                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3865                 writel(status, phba->CAregaddr);
3866                 readl(phba->CAregaddr); /* flush */
3867         }
3868         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3869                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3870                 pring->stats.iocb_cmd_empty++;
3871
3872                 /* Force update of the local copy of cmdGetInx */
3873                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3874                 lpfc_sli_resume_iocb(phba, pring);
3875
3876                 if ((pring->lpfc_sli_cmd_available))
3877                         (pring->lpfc_sli_cmd_available) (phba, pring);
3878
3879         }
3880
3881         spin_unlock_irqrestore(&phba->hbalock, iflag);
3882         return;
3883 }
3884
3885 /**
3886  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3887  * @phba: Pointer to HBA context object.
3888  * @pring: Pointer to driver SLI ring object.
3889  * @mask: Host attention register mask for this ring.
3890  *
3891  * This function is called from the worker thread when there is a pending
3892  * ELS response iocb on the driver internal slow-path response iocb worker
3893  * queue. The caller does not hold any lock. The function will remove each
3894  * response iocb from the response worker queue and calls the handle
3895  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3896  **/
3897 static void
3898 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3899                                    struct lpfc_sli_ring *pring, uint32_t mask)
3900 {
3901         struct lpfc_iocbq *irspiocbq;
3902         struct hbq_dmabuf *dmabuf;
3903         struct lpfc_cq_event *cq_event;
3904         unsigned long iflag;
3905         int count = 0;
3906
3907         spin_lock_irqsave(&phba->hbalock, iflag);
3908         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3909         spin_unlock_irqrestore(&phba->hbalock, iflag);
3910         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3911                 /* Get the response iocb from the head of work queue */
3912                 spin_lock_irqsave(&phba->hbalock, iflag);
3913                 list_remove_head(&phba->sli4_hba.sp_queue_event,
3914                                  cq_event, struct lpfc_cq_event, list);
3915                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3916
3917                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3918                 case CQE_CODE_COMPL_WQE:
3919                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3920                                                  cq_event);
3921                         /* Translate ELS WCQE to response IOCBQ */
3922                         irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3923                                                                    irspiocbq);
3924                         if (irspiocbq)
3925                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
3926                                                            irspiocbq);
3927                         count++;
3928                         break;
3929                 case CQE_CODE_RECEIVE:
3930                 case CQE_CODE_RECEIVE_V1:
3931                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
3932                                               cq_event);
3933                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
3934                         count++;
3935                         break;
3936                 default:
3937                         break;
3938                 }
3939
3940                 /* Limit the number of events to 64 to avoid soft lockups */
3941                 if (count == 64)
3942                         break;
3943         }
3944 }
3945
3946 /**
3947  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3948  * @phba: Pointer to HBA context object.
3949  * @pring: Pointer to driver SLI ring object.
3950  *
3951  * This function aborts all iocbs in the given ring and frees all the iocb
3952  * objects in txq. This function issues an abort iocb for all the iocb commands
3953  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3954  * the return of this function. The caller is not required to hold any locks.
3955  **/
3956 void
3957 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3958 {
3959         LIST_HEAD(completions);
3960         struct lpfc_iocbq *iocb, *next_iocb;
3961
3962         if (pring->ringno == LPFC_ELS_RING) {
3963                 lpfc_fabric_abort_hba(phba);
3964         }
3965
3966         /* Error everything on txq and txcmplq
3967          * First do the txq.
3968          */
3969         if (phba->sli_rev >= LPFC_SLI_REV4) {
3970                 spin_lock_irq(&pring->ring_lock);
3971                 list_splice_init(&pring->txq, &completions);
3972                 pring->txq_cnt = 0;
3973                 spin_unlock_irq(&pring->ring_lock);
3974
3975                 spin_lock_irq(&phba->hbalock);
3976                 /* Next issue ABTS for everything on the txcmplq */
3977                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3978                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3979                 spin_unlock_irq(&phba->hbalock);
3980         } else {
3981                 spin_lock_irq(&phba->hbalock);
3982                 list_splice_init(&pring->txq, &completions);
3983                 pring->txq_cnt = 0;
3984
3985                 /* Next issue ABTS for everything on the txcmplq */
3986                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3987                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3988                 spin_unlock_irq(&phba->hbalock);
3989         }
3990
3991         /* Cancel all the IOCBs from the completions list */
3992         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3993                               IOERR_SLI_ABORTED);
3994 }
3995
3996 /**
3997  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3998  * @phba: Pointer to HBA context object.
3999  * @pring: Pointer to driver SLI ring object.
4000  *
4001  * This function aborts all iocbs in FCP rings and frees all the iocb
4002  * objects in txq. This function issues an abort iocb for all the iocb commands
4003  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4004  * the return of this function. The caller is not required to hold any locks.
4005  **/
4006 void
4007 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4008 {
4009         struct lpfc_sli *psli = &phba->sli;
4010         struct lpfc_sli_ring  *pring;
4011         uint32_t i;
4012
4013         /* Look on all the FCP Rings for the iotag */
4014         if (phba->sli_rev >= LPFC_SLI_REV4) {
4015                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4016                         pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4017                         lpfc_sli_abort_iocb_ring(phba, pring);
4018                 }
4019         } else {
4020                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4021                 lpfc_sli_abort_iocb_ring(phba, pring);
4022         }
4023 }
4024
4025 /**
4026  * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4027  * @phba: Pointer to HBA context object.
4028  *
4029  * This function flushes all iocbs in the IO ring and frees all the iocb
4030  * objects in txq and txcmplq. This function will not issue abort iocbs
4031  * for all the iocb commands in txcmplq, they will just be returned with
4032  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4033  * slot has been permanently disabled.
4034  **/
4035 void
4036 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4037 {
4038         LIST_HEAD(txq);
4039         LIST_HEAD(txcmplq);
4040         struct lpfc_sli *psli = &phba->sli;
4041         struct lpfc_sli_ring  *pring;
4042         uint32_t i;
4043         struct lpfc_iocbq *piocb, *next_iocb;
4044
4045         spin_lock_irq(&phba->hbalock);
4046         /* Indicate the I/O queues are flushed */
4047         phba->hba_flag |= HBA_IOQ_FLUSH;
4048         spin_unlock_irq(&phba->hbalock);
4049
4050         /* Look on all the FCP Rings for the iotag */
4051         if (phba->sli_rev >= LPFC_SLI_REV4) {
4052                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4053                         pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4054
4055                         spin_lock_irq(&pring->ring_lock);
4056                         /* Retrieve everything on txq */
4057                         list_splice_init(&pring->txq, &txq);
4058                         list_for_each_entry_safe(piocb, next_iocb,
4059                                                  &pring->txcmplq, list)
4060                                 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4061                         /* Retrieve everything on the txcmplq */
4062                         list_splice_init(&pring->txcmplq, &txcmplq);
4063                         pring->txq_cnt = 0;
4064                         pring->txcmplq_cnt = 0;
4065                         spin_unlock_irq(&pring->ring_lock);
4066
4067                         /* Flush the txq */
4068                         lpfc_sli_cancel_iocbs(phba, &txq,
4069                                               IOSTAT_LOCAL_REJECT,
4070                                               IOERR_SLI_DOWN);
4071                         /* Flush the txcmpq */
4072                         lpfc_sli_cancel_iocbs(phba, &txcmplq,
4073                                               IOSTAT_LOCAL_REJECT,
4074                                               IOERR_SLI_DOWN);
4075                 }
4076         } else {
4077                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4078
4079                 spin_lock_irq(&phba->hbalock);
4080                 /* Retrieve everything on txq */
4081                 list_splice_init(&pring->txq, &txq);
4082                 list_for_each_entry_safe(piocb, next_iocb,
4083                                          &pring->txcmplq, list)
4084                         piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4085                 /* Retrieve everything on the txcmplq */
4086                 list_splice_init(&pring->txcmplq, &txcmplq);
4087                 pring->txq_cnt = 0;
4088                 pring->txcmplq_cnt = 0;
4089                 spin_unlock_irq(&phba->hbalock);
4090
4091                 /* Flush the txq */
4092                 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4093                                       IOERR_SLI_DOWN);
4094                 /* Flush the txcmpq */
4095                 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4096                                       IOERR_SLI_DOWN);
4097         }
4098 }
4099
4100 /**
4101  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4102  * @phba: Pointer to HBA context object.
4103  * @mask: Bit mask to be checked.
4104  *
4105  * This function reads the host status register and compares
4106  * with the provided bit mask to check if HBA completed
4107  * the restart. This function will wait in a loop for the
4108  * HBA to complete restart. If the HBA does not restart within
4109  * 15 iterations, the function will reset the HBA again. The
4110  * function returns 1 when HBA fail to restart otherwise returns
4111  * zero.
4112  **/
4113 static int
4114 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4115 {
4116         uint32_t status;
4117         int i = 0;
4118         int retval = 0;
4119
4120         /* Read the HBA Host Status Register */
4121         if (lpfc_readl(phba->HSregaddr, &status))
4122                 return 1;
4123
4124         /*
4125          * Check status register every 100ms for 5 retries, then every
4126          * 500ms for 5, then every 2.5 sec for 5, then reset board and
4127          * every 2.5 sec for 4.
4128          * Break our of the loop if errors occurred during init.
4129          */
4130         while (((status & mask) != mask) &&
4131                !(status & HS_FFERM) &&
4132                i++ < 20) {
4133
4134                 if (i <= 5)
4135                         msleep(10);
4136                 else if (i <= 10)
4137                         msleep(500);
4138                 else
4139                         msleep(2500);
4140
4141                 if (i == 15) {
4142                                 /* Do post */
4143                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4144                         lpfc_sli_brdrestart(phba);
4145                 }
4146                 /* Read the HBA Host Status Register */
4147                 if (lpfc_readl(phba->HSregaddr, &status)) {
4148                         retval = 1;
4149                         break;
4150                 }
4151         }
4152
4153         /* Check to see if any errors occurred during init */
4154         if ((status & HS_FFERM) || (i >= 20)) {
4155                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4156                                 "2751 Adapter failed to restart, "
4157                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4158                                 status,
4159                                 readl(phba->MBslimaddr + 0xa8),
4160                                 readl(phba->MBslimaddr + 0xac));
4161                 phba->link_state = LPFC_HBA_ERROR;
4162                 retval = 1;
4163         }
4164
4165         return retval;
4166 }
4167
4168 /**
4169  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4170  * @phba: Pointer to HBA context object.
4171  * @mask: Bit mask to be checked.
4172  *
4173  * This function checks the host status register to check if HBA is
4174  * ready. This function will wait in a loop for the HBA to be ready
4175  * If the HBA is not ready , the function will will reset the HBA PCI
4176  * function again. The function returns 1 when HBA fail to be ready
4177  * otherwise returns zero.
4178  **/
4179 static int
4180 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4181 {
4182         uint32_t status;
4183         int retval = 0;
4184
4185         /* Read the HBA Host Status Register */
4186         status = lpfc_sli4_post_status_check(phba);
4187
4188         if (status) {
4189                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4190                 lpfc_sli_brdrestart(phba);
4191                 status = lpfc_sli4_post_status_check(phba);
4192         }
4193
4194         /* Check to see if any errors occurred during init */
4195         if (status) {
4196                 phba->link_state = LPFC_HBA_ERROR;
4197                 retval = 1;
4198         } else
4199                 phba->sli4_hba.intr_enable = 0;
4200
4201         return retval;
4202 }
4203
4204 /**
4205  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4206  * @phba: Pointer to HBA context object.
4207  * @mask: Bit mask to be checked.
4208  *
4209  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4210  * from the API jump table function pointer from the lpfc_hba struct.
4211  **/
4212 int
4213 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4214 {
4215         return phba->lpfc_sli_brdready(phba, mask);
4216 }
4217
4218 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4219
4220 /**
4221  * lpfc_reset_barrier - Make HBA ready for HBA reset
4222  * @phba: Pointer to HBA context object.
4223  *
4224  * This function is called before resetting an HBA. This function is called
4225  * with hbalock held and requests HBA to quiesce DMAs before a reset.
4226  **/
4227 void lpfc_reset_barrier(struct lpfc_hba *phba)
4228 {
4229         uint32_t __iomem *resp_buf;
4230         uint32_t __iomem *mbox_buf;
4231         volatile uint32_t mbox;
4232         uint32_t hc_copy, ha_copy, resp_data;
4233         int  i;
4234         uint8_t hdrtype;
4235
4236         lockdep_assert_held(&phba->hbalock);
4237
4238         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4239         if (hdrtype != 0x80 ||
4240             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4241              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4242                 return;
4243
4244         /*
4245          * Tell the other part of the chip to suspend temporarily all
4246          * its DMA activity.
4247          */
4248         resp_buf = phba->MBslimaddr;
4249
4250         /* Disable the error attention */
4251         if (lpfc_readl(phba->HCregaddr, &hc_copy))
4252                 return;
4253         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4254         readl(phba->HCregaddr); /* flush */
4255         phba->link_flag |= LS_IGNORE_ERATT;
4256
4257         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4258                 return;
4259         if (ha_copy & HA_ERATT) {
4260                 /* Clear Chip error bit */
4261                 writel(HA_ERATT, phba->HAregaddr);
4262                 phba->pport->stopped = 1;
4263         }
4264
4265         mbox = 0;
4266         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4267         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4268
4269         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4270         mbox_buf = phba->MBslimaddr;
4271         writel(mbox, mbox_buf);
4272
4273         for (i = 0; i < 50; i++) {
4274                 if (lpfc_readl((resp_buf + 1), &resp_data))
4275                         return;
4276                 if (resp_data != ~(BARRIER_TEST_PATTERN))
4277                         mdelay(1);
4278                 else
4279                         break;
4280         }
4281         resp_data = 0;
4282         if (lpfc_readl((resp_buf + 1), &resp_data))
4283                 return;
4284         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4285                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4286                     phba->pport->stopped)
4287                         goto restore_hc;
4288                 else
4289                         goto clear_errat;
4290         }
4291
4292         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4293         resp_data = 0;
4294         for (i = 0; i < 500; i++) {
4295                 if (lpfc_readl(resp_buf, &resp_data))
4296                         return;
4297                 if (resp_data != mbox)
4298                         mdelay(1);
4299                 else
4300                         break;
4301         }
4302
4303 clear_errat:
4304
4305         while (++i < 500) {
4306                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4307                         return;
4308                 if (!(ha_copy & HA_ERATT))
4309                         mdelay(1);
4310                 else
4311                         break;
4312         }
4313
4314         if (readl(phba->HAregaddr) & HA_ERATT) {
4315                 writel(HA_ERATT, phba->HAregaddr);
4316                 phba->pport->stopped = 1;
4317         }
4318
4319 restore_hc:
4320         phba->link_flag &= ~LS_IGNORE_ERATT;
4321         writel(hc_copy, phba->HCregaddr);
4322         readl(phba->HCregaddr); /* flush */
4323 }
4324
4325 /**
4326  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4327  * @phba: Pointer to HBA context object.
4328  *
4329  * This function issues a kill_board mailbox command and waits for
4330  * the error attention interrupt. This function is called for stopping
4331  * the firmware processing. The caller is not required to hold any
4332  * locks. This function calls lpfc_hba_down_post function to free
4333  * any pending commands after the kill. The function will return 1 when it
4334  * fails to kill the board else will return 0.
4335  **/
4336 int
4337 lpfc_sli_brdkill(struct lpfc_hba *phba)
4338 {
4339         struct lpfc_sli *psli;
4340         LPFC_MBOXQ_t *pmb;
4341         uint32_t status;
4342         uint32_t ha_copy;
4343         int retval;
4344         int i = 0;
4345
4346         psli = &phba->sli;
4347
4348         /* Kill HBA */
4349         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4350                         "0329 Kill HBA Data: x%x x%x\n",
4351                         phba->pport->port_state, psli->sli_flag);
4352
4353         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4354         if (!pmb)
4355                 return 1;
4356
4357         /* Disable the error attention */
4358         spin_lock_irq(&phba->hbalock);
4359         if (lpfc_readl(phba->HCregaddr, &status)) {
4360                 spin_unlock_irq(&phba->hbalock);
4361                 mempool_free(pmb, phba->mbox_mem_pool);
4362                 return 1;
4363         }
4364         status &= ~HC_ERINT_ENA;
4365         writel(status, phba->HCregaddr);
4366         readl(phba->HCregaddr); /* flush */
4367         phba->link_flag |= LS_IGNORE_ERATT;
4368         spin_unlock_irq(&phba->hbalock);
4369
4370         lpfc_kill_board(phba, pmb);
4371         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4372         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4373
4374         if (retval != MBX_SUCCESS) {
4375                 if (retval != MBX_BUSY)
4376                         mempool_free(pmb, phba->mbox_mem_pool);
4377                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4378                                 "2752 KILL_BOARD command failed retval %d\n",
4379                                 retval);
4380                 spin_lock_irq(&phba->hbalock);
4381                 phba->link_flag &= ~LS_IGNORE_ERATT;
4382                 spin_unlock_irq(&phba->hbalock);
4383                 return 1;
4384         }
4385
4386         spin_lock_irq(&phba->hbalock);
4387         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4388         spin_unlock_irq(&phba->hbalock);
4389
4390         mempool_free(pmb, phba->mbox_mem_pool);
4391
4392         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4393          * attention every 100ms for 3 seconds. If we don't get ERATT after
4394          * 3 seconds we still set HBA_ERROR state because the status of the
4395          * board is now undefined.
4396          */
4397         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4398                 return 1;
4399         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4400                 mdelay(100);
4401                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4402                         return 1;
4403         }
4404
4405         del_timer_sync(&psli->mbox_tmo);
4406         if (ha_copy & HA_ERATT) {
4407                 writel(HA_ERATT, phba->HAregaddr);
4408                 phba->pport->stopped = 1;
4409         }
4410         spin_lock_irq(&phba->hbalock);
4411         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4412         psli->mbox_active = NULL;
4413         phba->link_flag &= ~LS_IGNORE_ERATT;
4414         spin_unlock_irq(&phba->hbalock);
4415
4416         lpfc_hba_down_post(phba);
4417         phba->link_state = LPFC_HBA_ERROR;
4418
4419         return ha_copy & HA_ERATT ? 0 : 1;
4420 }
4421
4422 /**
4423  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4424  * @phba: Pointer to HBA context object.
4425  *
4426  * This function resets the HBA by writing HC_INITFF to the control
4427  * register. After the HBA resets, this function resets all the iocb ring
4428  * indices. This function disables PCI layer parity checking during
4429  * the reset.
4430  * This function returns 0 always.
4431  * The caller is not required to hold any locks.
4432  **/
4433 int
4434 lpfc_sli_brdreset(struct lpfc_hba *phba)
4435 {
4436         struct lpfc_sli *psli;
4437         struct lpfc_sli_ring *pring;
4438         uint16_t cfg_value;
4439         int i;
4440
4441         psli = &phba->sli;
4442
4443         /* Reset HBA */
4444         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4445                         "0325 Reset HBA Data: x%x x%x\n",
4446                         (phba->pport) ? phba->pport->port_state : 0,
4447                         psli->sli_flag);
4448
4449         /* perform board reset */
4450         phba->fc_eventTag = 0;
4451         phba->link_events = 0;
4452         if (phba->pport) {
4453                 phba->pport->fc_myDID = 0;
4454                 phba->pport->fc_prevDID = 0;
4455         }
4456
4457         /* Turn off parity checking and serr during the physical reset */
4458         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4459                 return -EIO;
4460
4461         pci_write_config_word(phba->pcidev, PCI_COMMAND,
4462                               (cfg_value &
4463                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4464
4465         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4466
4467         /* Now toggle INITFF bit in the Host Control Register */
4468         writel(HC_INITFF, phba->HCregaddr);
4469         mdelay(1);
4470         readl(phba->HCregaddr); /* flush */
4471         writel(0, phba->HCregaddr);
4472         readl(phba->HCregaddr); /* flush */
4473
4474         /* Restore PCI cmd register */
4475         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4476
4477         /* Initialize relevant SLI info */
4478         for (i = 0; i < psli->num_rings; i++) {
4479                 pring = &psli->sli3_ring[i];
4480                 pring->flag = 0;
4481                 pring->sli.sli3.rspidx = 0;
4482                 pring->sli.sli3.next_cmdidx  = 0;
4483                 pring->sli.sli3.local_getidx = 0;
4484                 pring->sli.sli3.cmdidx = 0;
4485                 pring->missbufcnt = 0;
4486         }
4487
4488         phba->link_state = LPFC_WARM_START;
4489         return 0;
4490 }
4491
4492 /**
4493  * lpfc_sli4_brdreset - Reset a sli-4 HBA
4494  * @phba: Pointer to HBA context object.
4495  *
4496  * This function resets a SLI4 HBA. This function disables PCI layer parity
4497  * checking during resets the device. The caller is not required to hold
4498  * any locks.
4499  *
4500  * This function returns 0 on success else returns negative error code.
4501  **/
4502 int
4503 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4504 {
4505         struct lpfc_sli *psli = &phba->sli;
4506         uint16_t cfg_value;
4507         int rc = 0;
4508
4509         /* Reset HBA */
4510         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4511                         "0295 Reset HBA Data: x%x x%x x%x\n",
4512                         phba->pport->port_state, psli->sli_flag,
4513                         phba->hba_flag);
4514
4515         /* perform board reset */
4516         phba->fc_eventTag = 0;
4517         phba->link_events = 0;
4518         phba->pport->fc_myDID = 0;
4519         phba->pport->fc_prevDID = 0;
4520
4521         spin_lock_irq(&phba->hbalock);
4522         psli->sli_flag &= ~(LPFC_PROCESS_LA);
4523         phba->fcf.fcf_flag = 0;
4524         spin_unlock_irq(&phba->hbalock);
4525
4526         /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4527         if (phba->hba_flag & HBA_FW_DUMP_OP) {
4528                 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4529                 return rc;
4530         }
4531
4532         /* Now physically reset the device */
4533         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4534                         "0389 Performing PCI function reset!\n");
4535
4536         /* Turn off parity checking and serr during the physical reset */
4537         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4538                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4539                                 "3205 PCI read Config failed\n");
4540                 return -EIO;
4541         }
4542
4543         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4544                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4545
4546         /* Perform FCoE PCI function reset before freeing queue memory */
4547         rc = lpfc_pci_function_reset(phba);
4548
4549         /* Restore PCI cmd register */
4550         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4551
4552         return rc;
4553 }
4554
4555 /**
4556  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4557  * @phba: Pointer to HBA context object.
4558  *
4559  * This function is called in the SLI initialization code path to
4560  * restart the HBA. The caller is not required to hold any lock.
4561  * This function writes MBX_RESTART mailbox command to the SLIM and
4562  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4563  * function to free any pending commands. The function enables
4564  * POST only during the first initialization. The function returns zero.
4565  * The function does not guarantee completion of MBX_RESTART mailbox
4566  * command before the return of this function.
4567  **/
4568 static int
4569 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4570 {
4571         MAILBOX_t *mb;
4572         struct lpfc_sli *psli;
4573         volatile uint32_t word0;
4574         void __iomem *to_slim;
4575         uint32_t hba_aer_enabled;
4576
4577         spin_lock_irq(&phba->hbalock);
4578
4579         /* Take PCIe device Advanced Error Reporting (AER) state */
4580         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4581
4582         psli = &phba->sli;
4583
4584         /* Restart HBA */
4585         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4586                         "0337 Restart HBA Data: x%x x%x\n",
4587                         (phba->pport) ? phba->pport->port_state : 0,
4588                         psli->sli_flag);
4589
4590         word0 = 0;
4591         mb = (MAILBOX_t *) &word0;
4592         mb->mbxCommand = MBX_RESTART;
4593         mb->mbxHc = 1;
4594
4595         lpfc_reset_barrier(phba);
4596
4597         to_slim = phba->MBslimaddr;
4598         writel(*(uint32_t *) mb, to_slim);
4599         readl(to_slim); /* flush */
4600
4601         /* Only skip post after fc_ffinit is completed */
4602         if (phba->pport && phba->pport->port_state)
4603                 word0 = 1;      /* This is really setting up word1 */
4604         else
4605                 word0 = 0;      /* This is really setting up word1 */
4606         to_slim = phba->MBslimaddr + sizeof (uint32_t);
4607         writel(*(uint32_t *) mb, to_slim);
4608         readl(to_slim); /* flush */
4609
4610         lpfc_sli_brdreset(phba);
4611         if (phba->pport)
4612                 phba->pport->stopped = 0;
4613         phba->link_state = LPFC_INIT_START;
4614         phba->hba_flag = 0;
4615         spin_unlock_irq(&phba->hbalock);
4616
4617         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4618         psli->stats_start = ktime_get_seconds();
4619
4620         /* Give the INITFF and Post time to settle. */
4621         mdelay(100);
4622
4623         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4624         if (hba_aer_enabled)
4625                 pci_disable_pcie_error_reporting(phba->pcidev);
4626
4627         lpfc_hba_down_post(phba);
4628
4629         return 0;
4630 }
4631
4632 /**
4633  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4634  * @phba: Pointer to HBA context object.
4635  *
4636  * This function is called in the SLI initialization code path to restart
4637  * a SLI4 HBA. The caller is not required to hold any lock.
4638  * At the end of the function, it calls lpfc_hba_down_post function to
4639  * free any pending commands.
4640  **/
4641 static int
4642 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4643 {
4644         struct lpfc_sli *psli = &phba->sli;
4645         uint32_t hba_aer_enabled;
4646         int rc;
4647
4648         /* Restart HBA */
4649         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4650                         "0296 Restart HBA Data: x%x x%x\n",
4651                         phba->pport->port_state, psli->sli_flag);
4652
4653         /* Take PCIe device Advanced Error Reporting (AER) state */
4654         hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4655
4656         rc = lpfc_sli4_brdreset(phba);
4657         if (rc) {
4658                 phba->link_state = LPFC_HBA_ERROR;
4659                 goto hba_down_queue;
4660         }
4661
4662         spin_lock_irq(&phba->hbalock);
4663         phba->pport->stopped = 0;
4664         phba->link_state = LPFC_INIT_START;
4665         phba->hba_flag = 0;
4666         spin_unlock_irq(&phba->hbalock);
4667
4668         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4669         psli->stats_start = ktime_get_seconds();
4670
4671         /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4672         if (hba_aer_enabled)
4673                 pci_disable_pcie_error_reporting(phba->pcidev);
4674
4675 hba_down_queue:
4676         lpfc_hba_down_post(phba);
4677         lpfc_sli4_queue_destroy(phba);
4678
4679         return rc;
4680 }
4681
4682 /**
4683  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4684  * @phba: Pointer to HBA context object.
4685  *
4686  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4687  * API jump table function pointer from the lpfc_hba struct.
4688 **/
4689 int
4690 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4691 {
4692         return phba->lpfc_sli_brdrestart(phba);
4693 }
4694
4695 /**
4696  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4697  * @phba: Pointer to HBA context object.
4698  *
4699  * This function is called after a HBA restart to wait for successful
4700  * restart of the HBA. Successful restart of the HBA is indicated by
4701  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4702  * iteration, the function will restart the HBA again. The function returns
4703  * zero if HBA successfully restarted else returns negative error code.
4704  **/
4705 int
4706 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4707 {
4708         uint32_t status, i = 0;
4709
4710         /* Read the HBA Host Status Register */
4711         if (lpfc_readl(phba->HSregaddr, &status))
4712                 return -EIO;
4713
4714         /* Check status register to see what current state is */
4715         i = 0;
4716         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4717
4718                 /* Check every 10ms for 10 retries, then every 100ms for 90
4719                  * retries, then every 1 sec for 50 retires for a total of
4720                  * ~60 seconds before reset the board again and check every
4721                  * 1 sec for 50 retries. The up to 60 seconds before the
4722                  * board ready is required by the Falcon FIPS zeroization
4723                  * complete, and any reset the board in between shall cause
4724                  * restart of zeroization, further delay the board ready.
4725                  */
4726                 if (i++ >= 200) {
4727                         /* Adapter failed to init, timeout, status reg
4728                            <status> */
4729                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4730                                         "0436 Adapter failed to init, "
4731                                         "timeout, status reg x%x, "
4732                                         "FW Data: A8 x%x AC x%x\n", status,
4733                                         readl(phba->MBslimaddr + 0xa8),
4734                                         readl(phba->MBslimaddr + 0xac));
4735                         phba->link_state = LPFC_HBA_ERROR;
4736                         return -ETIMEDOUT;
4737                 }
4738
4739                 /* Check to see if any errors occurred during init */
4740                 if (status & HS_FFERM) {
4741                         /* ERROR: During chipset initialization */
4742                         /* Adapter failed to init, chipset, status reg
4743                            <status> */
4744                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4745                                         "0437 Adapter failed to init, "
4746                                         "chipset, status reg x%x, "
4747                                         "FW Data: A8 x%x AC x%x\n", status,
4748                                         readl(phba->MBslimaddr + 0xa8),
4749                                         readl(phba->MBslimaddr + 0xac));
4750                         phba->link_state = LPFC_HBA_ERROR;
4751                         return -EIO;
4752                 }
4753
4754                 if (i <= 10)
4755                         msleep(10);
4756                 else if (i <= 100)
4757                         msleep(100);
4758                 else
4759                         msleep(1000);
4760
4761                 if (i == 150) {
4762                         /* Do post */
4763                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4764                         lpfc_sli_brdrestart(phba);
4765                 }
4766                 /* Read the HBA Host Status Register */
4767                 if (lpfc_readl(phba->HSregaddr, &status))
4768                         return -EIO;
4769         }
4770
4771         /* Check to see if any errors occurred during init */
4772         if (status & HS_FFERM) {
4773                 /* ERROR: During chipset initialization */
4774                 /* Adapter failed to init, chipset, status reg <status> */
4775                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4776                                 "0438 Adapter failed to init, chipset, "
4777                                 "status reg x%x, "
4778                                 "FW Data: A8 x%x AC x%x\n", status,
4779                                 readl(phba->MBslimaddr + 0xa8),
4780                                 readl(phba->MBslimaddr + 0xac));
4781                 phba->link_state = LPFC_HBA_ERROR;
4782                 return -EIO;
4783         }
4784
4785         /* Clear all interrupt enable conditions */
4786         writel(0, phba->HCregaddr);
4787         readl(phba->HCregaddr); /* flush */
4788
4789         /* setup host attn register */
4790         writel(0xffffffff, phba->HAregaddr);
4791         readl(phba->HAregaddr); /* flush */
4792         return 0;
4793 }
4794
4795 /**
4796  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4797  *
4798  * This function calculates and returns the number of HBQs required to be
4799  * configured.
4800  **/
4801 int
4802 lpfc_sli_hbq_count(void)
4803 {
4804         return ARRAY_SIZE(lpfc_hbq_defs);
4805 }
4806
4807 /**
4808  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4809  *
4810  * This function adds the number of hbq entries in every HBQ to get
4811  * the total number of hbq entries required for the HBA and returns
4812  * the total count.
4813  **/
4814 static int
4815 lpfc_sli_hbq_entry_count(void)
4816 {
4817         int  hbq_count = lpfc_sli_hbq_count();
4818         int  count = 0;
4819         int  i;
4820
4821         for (i = 0; i < hbq_count; ++i)
4822                 count += lpfc_hbq_defs[i]->entry_count;
4823         return count;
4824 }
4825
4826 /**
4827  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4828  *
4829  * This function calculates amount of memory required for all hbq entries
4830  * to be configured and returns the total memory required.
4831  **/
4832 int
4833 lpfc_sli_hbq_size(void)
4834 {
4835         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4836 }
4837
4838 /**
4839  * lpfc_sli_hbq_setup - configure and initialize HBQs
4840  * @phba: Pointer to HBA context object.
4841  *
4842  * This function is called during the SLI initialization to configure
4843  * all the HBQs and post buffers to the HBQ. The caller is not
4844  * required to hold any locks. This function will return zero if successful
4845  * else it will return negative error code.
4846  **/
4847 static int
4848 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4849 {
4850         int  hbq_count = lpfc_sli_hbq_count();
4851         LPFC_MBOXQ_t *pmb;
4852         MAILBOX_t *pmbox;
4853         uint32_t hbqno;
4854         uint32_t hbq_entry_index;
4855
4856                                 /* Get a Mailbox buffer to setup mailbox
4857                                  * commands for HBA initialization
4858                                  */
4859         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4860
4861         if (!pmb)
4862                 return -ENOMEM;
4863
4864         pmbox = &pmb->u.mb;
4865
4866         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4867         phba->link_state = LPFC_INIT_MBX_CMDS;
4868         phba->hbq_in_use = 1;
4869
4870         hbq_entry_index = 0;
4871         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4872                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4873                 phba->hbqs[hbqno].hbqPutIdx      = 0;
4874                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4875                 phba->hbqs[hbqno].entry_count =
4876                         lpfc_hbq_defs[hbqno]->entry_count;
4877                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4878                         hbq_entry_index, pmb);
4879                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4880
4881                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4882                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4883                            mbxStatus <status>, ring <num> */
4884
4885                         lpfc_printf_log(phba, KERN_ERR,
4886                                         LOG_SLI | LOG_VPORT,
4887                                         "1805 Adapter failed to init. "
4888                                         "Data: x%x x%x x%x\n",
4889                                         pmbox->mbxCommand,
4890                                         pmbox->mbxStatus, hbqno);
4891
4892                         phba->link_state = LPFC_HBA_ERROR;
4893                         mempool_free(pmb, phba->mbox_mem_pool);
4894                         return -ENXIO;
4895                 }
4896         }
4897         phba->hbq_count = hbq_count;
4898
4899         mempool_free(pmb, phba->mbox_mem_pool);
4900
4901         /* Initially populate or replenish the HBQs */
4902         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4903                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4904         return 0;
4905 }
4906
4907 /**
4908  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4909  * @phba: Pointer to HBA context object.
4910  *
4911  * This function is called during the SLI initialization to configure
4912  * all the HBQs and post buffers to the HBQ. The caller is not
4913  * required to hold any locks. This function will return zero if successful
4914  * else it will return negative error code.
4915  **/
4916 static int
4917 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4918 {
4919         phba->hbq_in_use = 1;
4920         phba->hbqs[LPFC_ELS_HBQ].entry_count =
4921                 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4922         phba->hbq_count = 1;
4923         lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4924         /* Initially populate or replenish the HBQs */
4925         return 0;
4926 }
4927
4928 /**
4929  * lpfc_sli_config_port - Issue config port mailbox command
4930  * @phba: Pointer to HBA context object.
4931  * @sli_mode: sli mode - 2/3
4932  *
4933  * This function is called by the sli initialization code path
4934  * to issue config_port mailbox command. This function restarts the
4935  * HBA firmware and issues a config_port mailbox command to configure
4936  * the SLI interface in the sli mode specified by sli_mode
4937  * variable. The caller is not required to hold any locks.
4938  * The function returns 0 if successful, else returns negative error
4939  * code.
4940  **/
4941 int
4942 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4943 {
4944         LPFC_MBOXQ_t *pmb;
4945         uint32_t resetcount = 0, rc = 0, done = 0;
4946
4947         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4948         if (!pmb) {
4949                 phba->link_state = LPFC_HBA_ERROR;
4950                 return -ENOMEM;
4951         }
4952
4953         phba->sli_rev = sli_mode;
4954         while (resetcount < 2 && !done) {
4955                 spin_lock_irq(&phba->hbalock);
4956                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4957                 spin_unlock_irq(&phba->hbalock);
4958                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4959                 lpfc_sli_brdrestart(phba);
4960                 rc = lpfc_sli_chipset_init(phba);
4961                 if (rc)
4962                         break;
4963
4964                 spin_lock_irq(&phba->hbalock);
4965                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4966                 spin_unlock_irq(&phba->hbalock);
4967                 resetcount++;
4968
4969                 /* Call pre CONFIG_PORT mailbox command initialization.  A
4970                  * value of 0 means the call was successful.  Any other
4971                  * nonzero value is a failure, but if ERESTART is returned,
4972                  * the driver may reset the HBA and try again.
4973                  */
4974                 rc = lpfc_config_port_prep(phba);
4975                 if (rc == -ERESTART) {
4976                         phba->link_state = LPFC_LINK_UNKNOWN;
4977                         continue;
4978                 } else if (rc)
4979                         break;
4980
4981                 phba->link_state = LPFC_INIT_MBX_CMDS;
4982                 lpfc_config_port(phba, pmb);
4983                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4984                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4985                                         LPFC_SLI3_HBQ_ENABLED |
4986                                         LPFC_SLI3_CRP_ENABLED |
4987                                         LPFC_SLI3_DSS_ENABLED);
4988                 if (rc != MBX_SUCCESS) {
4989                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4990                                 "0442 Adapter failed to init, mbxCmd x%x "
4991                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4992                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4993                         spin_lock_irq(&phba->hbalock);
4994                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4995                         spin_unlock_irq(&phba->hbalock);
4996                         rc = -ENXIO;
4997                 } else {
4998                         /* Allow asynchronous mailbox command to go through */
4999                         spin_lock_irq(&phba->hbalock);
5000                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5001                         spin_unlock_irq(&phba->hbalock);
5002                         done = 1;
5003
5004                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5005                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
5006                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5007                                         "3110 Port did not grant ASABT\n");
5008                 }
5009         }
5010         if (!done) {
5011                 rc = -EINVAL;
5012                 goto do_prep_failed;
5013         }
5014         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5015                 if (!pmb->u.mb.un.varCfgPort.cMA) {
5016                         rc = -ENXIO;
5017                         goto do_prep_failed;
5018                 }
5019                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5020                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5021                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5022                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5023                                 phba->max_vpi : phba->max_vports;
5024
5025                 } else
5026                         phba->max_vpi = 0;
5027                 phba->fips_level = 0;
5028                 phba->fips_spec_rev = 0;
5029                 if (pmb->u.mb.un.varCfgPort.gdss) {
5030                         phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5031                         phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5032                         phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5033                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5034                                         "2850 Security Crypto Active. FIPS x%d "
5035                                         "(Spec Rev: x%d)",
5036                                         phba->fips_level, phba->fips_spec_rev);
5037                 }
5038                 if (pmb->u.mb.un.varCfgPort.sec_err) {
5039                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5040                                         "2856 Config Port Security Crypto "
5041                                         "Error: x%x ",
5042                                         pmb->u.mb.un.varCfgPort.sec_err);
5043                 }
5044                 if (pmb->u.mb.un.varCfgPort.gerbm)
5045                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5046                 if (pmb->u.mb.un.varCfgPort.gcrp)
5047                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5048
5049                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5050                 phba->port_gp = phba->mbox->us.s3_pgp.port;
5051
5052                 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5053                         if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5054                                 phba->cfg_enable_bg = 0;
5055                                 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5056                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5057                                                 "0443 Adapter did not grant "
5058                                                 "BlockGuard\n");
5059                         }
5060                 }
5061         } else {
5062                 phba->hbq_get = NULL;
5063                 phba->port_gp = phba->mbox->us.s2.port;
5064                 phba->max_vpi = 0;
5065         }
5066 do_prep_failed:
5067         mempool_free(pmb, phba->mbox_mem_pool);
5068         return rc;
5069 }
5070
5071
5072 /**
5073  * lpfc_sli_hba_setup - SLI initialization function
5074  * @phba: Pointer to HBA context object.
5075  *
5076  * This function is the main SLI initialization function. This function
5077  * is called by the HBA initialization code, HBA reset code and HBA
5078  * error attention handler code. Caller is not required to hold any
5079  * locks. This function issues config_port mailbox command to configure
5080  * the SLI, setup iocb rings and HBQ rings. In the end the function
5081  * calls the config_port_post function to issue init_link mailbox
5082  * command and to start the discovery. The function will return zero
5083  * if successful, else it will return negative error code.
5084  **/
5085 int
5086 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5087 {
5088         uint32_t rc;
5089         int  mode = 3, i;
5090         int longs;
5091
5092         switch (phba->cfg_sli_mode) {
5093         case 2:
5094                 if (phba->cfg_enable_npiv) {
5095                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5096                                 "1824 NPIV enabled: Override sli_mode "
5097                                 "parameter (%d) to auto (0).\n",
5098                                 phba->cfg_sli_mode);
5099                         break;
5100                 }
5101                 mode = 2;
5102                 break;
5103         case 0:
5104         case 3:
5105                 break;
5106         default:
5107                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5108                                 "1819 Unrecognized sli_mode parameter: %d.\n",
5109                                 phba->cfg_sli_mode);
5110
5111                 break;
5112         }
5113         phba->fcp_embed_io = 0; /* SLI4 FC support only */
5114
5115         rc = lpfc_sli_config_port(phba, mode);
5116
5117         if (rc && phba->cfg_sli_mode == 3)
5118                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5119                                 "1820 Unable to select SLI-3.  "
5120                                 "Not supported by adapter.\n");
5121         if (rc && mode != 2)
5122                 rc = lpfc_sli_config_port(phba, 2);
5123         else if (rc && mode == 2)
5124                 rc = lpfc_sli_config_port(phba, 3);
5125         if (rc)
5126                 goto lpfc_sli_hba_setup_error;
5127
5128         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5129         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5130                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5131                 if (!rc) {
5132                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5133                                         "2709 This device supports "
5134                                         "Advanced Error Reporting (AER)\n");
5135                         spin_lock_irq(&phba->hbalock);
5136                         phba->hba_flag |= HBA_AER_ENABLED;
5137                         spin_unlock_irq(&phba->hbalock);
5138                 } else {
5139                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5140                                         "2708 This device does not support "
5141                                         "Advanced Error Reporting (AER): %d\n",
5142                                         rc);
5143                         phba->cfg_aer_support = 0;
5144                 }
5145         }
5146
5147         if (phba->sli_rev == 3) {
5148                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5149                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5150         } else {
5151                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5152                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5153                 phba->sli3_options = 0;
5154         }
5155
5156         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5157                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5158                         phba->sli_rev, phba->max_vpi);
5159         rc = lpfc_sli_ring_map(phba);
5160
5161         if (rc)
5162                 goto lpfc_sli_hba_setup_error;
5163
5164         /* Initialize VPIs. */
5165         if (phba->sli_rev == LPFC_SLI_REV3) {
5166                 /*
5167                  * The VPI bitmask and physical ID array are allocated
5168                  * and initialized once only - at driver load.  A port
5169                  * reset doesn't need to reinitialize this memory.
5170                  */
5171                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5172                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5173                         phba->vpi_bmask = kcalloc(longs,
5174                                                   sizeof(unsigned long),
5175                                                   GFP_KERNEL);
5176                         if (!phba->vpi_bmask) {
5177                                 rc = -ENOMEM;
5178                                 goto lpfc_sli_hba_setup_error;
5179                         }
5180
5181                         phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5182                                                 sizeof(uint16_t),
5183                                                 GFP_KERNEL);
5184                         if (!phba->vpi_ids) {
5185                                 kfree(phba->vpi_bmask);
5186                                 rc = -ENOMEM;
5187                                 goto lpfc_sli_hba_setup_error;
5188                         }
5189                         for (i = 0; i < phba->max_vpi; i++)
5190                                 phba->vpi_ids[i] = i;
5191                 }
5192         }
5193
5194         /* Init HBQs */
5195         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5196                 rc = lpfc_sli_hbq_setup(phba);
5197                 if (rc)
5198                         goto lpfc_sli_hba_setup_error;
5199         }
5200         spin_lock_irq(&phba->hbalock);
5201         phba->sli.sli_flag |= LPFC_PROCESS_LA;
5202         spin_unlock_irq(&phba->hbalock);
5203
5204         rc = lpfc_config_port_post(phba);
5205         if (rc)
5206                 goto lpfc_sli_hba_setup_error;
5207
5208         return rc;
5209
5210 lpfc_sli_hba_setup_error:
5211         phba->link_state = LPFC_HBA_ERROR;
5212         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5213                         "0445 Firmware initialization failed\n");
5214         return rc;
5215 }
5216
5217 /**
5218  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5219  * @phba: Pointer to HBA context object.
5220  * @mboxq: mailbox pointer.
5221  * This function issue a dump mailbox command to read config region
5222  * 23 and parse the records in the region and populate driver
5223  * data structure.
5224  **/
5225 static int
5226 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5227 {
5228         LPFC_MBOXQ_t *mboxq;
5229         struct lpfc_dmabuf *mp;
5230         struct lpfc_mqe *mqe;
5231         uint32_t data_length;
5232         int rc;
5233
5234         /* Program the default value of vlan_id and fc_map */
5235         phba->valid_vlan = 0;
5236         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5237         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5238         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5239
5240         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5241         if (!mboxq)
5242                 return -ENOMEM;
5243
5244         mqe = &mboxq->u.mqe;
5245         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5246                 rc = -ENOMEM;
5247                 goto out_free_mboxq;
5248         }
5249
5250         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5251         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5252
5253         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5254                         "(%d):2571 Mailbox cmd x%x Status x%x "
5255                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5256                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5257                         "CQ: x%x x%x x%x x%x\n",
5258                         mboxq->vport ? mboxq->vport->vpi : 0,
5259                         bf_get(lpfc_mqe_command, mqe),
5260                         bf_get(lpfc_mqe_status, mqe),
5261                         mqe->un.mb_words[0], mqe->un.mb_words[1],
5262                         mqe->un.mb_words[2], mqe->un.mb_words[3],
5263                         mqe->un.mb_words[4], mqe->un.mb_words[5],
5264                         mqe->un.mb_words[6], mqe->un.mb_words[7],
5265                         mqe->un.mb_words[8], mqe->un.mb_words[9],
5266                         mqe->un.mb_words[10], mqe->un.mb_words[11],
5267                         mqe->un.mb_words[12], mqe->un.mb_words[13],
5268                         mqe->un.mb_words[14], mqe->un.mb_words[15],
5269                         mqe->un.mb_words[16], mqe->un.mb_words[50],
5270                         mboxq->mcqe.word0,
5271                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5272                         mboxq->mcqe.trailer);
5273
5274         if (rc) {
5275                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5276                 kfree(mp);
5277                 rc = -EIO;
5278                 goto out_free_mboxq;
5279         }
5280         data_length = mqe->un.mb_words[5];
5281         if (data_length > DMP_RGN23_SIZE) {
5282                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5283                 kfree(mp);
5284                 rc = -EIO;
5285                 goto out_free_mboxq;
5286         }
5287
5288         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5289         lpfc_mbuf_free(phba, mp->virt, mp->phys);
5290         kfree(mp);
5291         rc = 0;
5292
5293 out_free_mboxq:
5294         mempool_free(mboxq, phba->mbox_mem_pool);
5295         return rc;
5296 }
5297
5298 /**
5299  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5300  * @phba: pointer to lpfc hba data structure.
5301  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5302  * @vpd: pointer to the memory to hold resulting port vpd data.
5303  * @vpd_size: On input, the number of bytes allocated to @vpd.
5304  *            On output, the number of data bytes in @vpd.
5305  *
5306  * This routine executes a READ_REV SLI4 mailbox command.  In
5307  * addition, this routine gets the port vpd data.
5308  *
5309  * Return codes
5310  *      0 - successful
5311  *      -ENOMEM - could not allocated memory.
5312  **/
5313 static int
5314 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5315                     uint8_t *vpd, uint32_t *vpd_size)
5316 {
5317         int rc = 0;
5318         uint32_t dma_size;
5319         struct lpfc_dmabuf *dmabuf;
5320         struct lpfc_mqe *mqe;
5321
5322         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5323         if (!dmabuf)
5324                 return -ENOMEM;
5325
5326         /*
5327          * Get a DMA buffer for the vpd data resulting from the READ_REV
5328          * mailbox command.
5329          */
5330         dma_size = *vpd_size;
5331         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5332                                           &dmabuf->phys, GFP_KERNEL);
5333         if (!dmabuf->virt) {
5334                 kfree(dmabuf);
5335                 return -ENOMEM;
5336         }
5337
5338         /*
5339          * The SLI4 implementation of READ_REV conflicts at word1,
5340          * bits 31:16 and SLI4 adds vpd functionality not present
5341          * in SLI3.  This code corrects the conflicts.
5342          */
5343         lpfc_read_rev(phba, mboxq);
5344         mqe = &mboxq->u.mqe;
5345         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5346         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5347         mqe->un.read_rev.word1 &= 0x0000FFFF;
5348         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5349         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5350
5351         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5352         if (rc) {
5353                 dma_free_coherent(&phba->pcidev->dev, dma_size,
5354                                   dmabuf->virt, dmabuf->phys);
5355                 kfree(dmabuf);
5356                 return -EIO;
5357         }
5358
5359         /*
5360          * The available vpd length cannot be bigger than the
5361          * DMA buffer passed to the port.  Catch the less than
5362          * case and update the caller's size.
5363          */
5364         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5365                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5366
5367         memcpy(vpd, dmabuf->virt, *vpd_size);
5368
5369         dma_free_coherent(&phba->pcidev->dev, dma_size,
5370                           dmabuf->virt, dmabuf->phys);
5371         kfree(dmabuf);
5372         return 0;
5373 }
5374
5375 /**
5376  * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5377  * @phba: pointer to lpfc hba data structure.
5378  *
5379  * This routine retrieves SLI4 device physical port name this PCI function
5380  * is attached to.
5381  *
5382  * Return codes
5383  *      0 - successful
5384  *      otherwise - failed to retrieve controller attributes
5385  **/
5386 static int
5387 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5388 {
5389         LPFC_MBOXQ_t *mboxq;
5390         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5391         struct lpfc_controller_attribute *cntl_attr;
5392         void *virtaddr = NULL;
5393         uint32_t alloclen, reqlen;
5394         uint32_t shdr_status, shdr_add_status;
5395         union lpfc_sli4_cfg_shdr *shdr;
5396         int rc;
5397
5398         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5399         if (!mboxq)
5400                 return -ENOMEM;
5401
5402         /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5403         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5404         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5405                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5406                         LPFC_SLI4_MBX_NEMBED);
5407
5408         if (alloclen < reqlen) {
5409                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5410                                 "3084 Allocated DMA memory size (%d) is "
5411                                 "less than the requested DMA memory size "
5412                                 "(%d)\n", alloclen, reqlen);
5413                 rc = -ENOMEM;
5414                 goto out_free_mboxq;
5415         }
5416         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5417         virtaddr = mboxq->sge_array->addr[0];
5418         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5419         shdr = &mbx_cntl_attr->cfg_shdr;
5420         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5421         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5422         if (shdr_status || shdr_add_status || rc) {
5423                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5424                                 "3085 Mailbox x%x (x%x/x%x) failed, "
5425                                 "rc:x%x, status:x%x, add_status:x%x\n",
5426                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5427                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5428                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5429                                 rc, shdr_status, shdr_add_status);
5430                 rc = -ENXIO;
5431                 goto out_free_mboxq;
5432         }
5433
5434         cntl_attr = &mbx_cntl_attr->cntl_attr;
5435         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5436         phba->sli4_hba.lnk_info.lnk_tp =
5437                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5438         phba->sli4_hba.lnk_info.lnk_no =
5439                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5440
5441         memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5442         strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5443                 sizeof(phba->BIOSVersion));
5444
5445         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5446                         "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5447                         phba->sli4_hba.lnk_info.lnk_tp,
5448                         phba->sli4_hba.lnk_info.lnk_no,
5449                         phba->BIOSVersion);
5450 out_free_mboxq:
5451         if (rc != MBX_TIMEOUT) {
5452                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5453                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
5454                 else
5455                         mempool_free(mboxq, phba->mbox_mem_pool);
5456         }
5457         return rc;
5458 }
5459
5460 /**
5461  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5462  * @phba: pointer to lpfc hba data structure.
5463  *
5464  * This routine retrieves SLI4 device physical port name this PCI function
5465  * is attached to.
5466  *
5467  * Return codes
5468  *      0 - successful
5469  *      otherwise - failed to retrieve physical port name
5470  **/
5471 static int
5472 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5473 {
5474         LPFC_MBOXQ_t *mboxq;
5475         struct lpfc_mbx_get_port_name *get_port_name;
5476         uint32_t shdr_status, shdr_add_status;
5477         union lpfc_sli4_cfg_shdr *shdr;
5478         char cport_name = 0;
5479         int rc;
5480
5481         /* We assume nothing at this point */
5482         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5483         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5484
5485         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5486         if (!mboxq)
5487                 return -ENOMEM;
5488         /* obtain link type and link number via READ_CONFIG */
5489         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5490         lpfc_sli4_read_config(phba);
5491         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5492                 goto retrieve_ppname;
5493
5494         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5495         rc = lpfc_sli4_get_ctl_attr(phba);
5496         if (rc)
5497                 goto out_free_mboxq;
5498
5499 retrieve_ppname:
5500         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5501                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5502                 sizeof(struct lpfc_mbx_get_port_name) -
5503                 sizeof(struct lpfc_sli4_cfg_mhdr),
5504                 LPFC_SLI4_MBX_EMBED);
5505         get_port_name = &mboxq->u.mqe.un.get_port_name;
5506         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5507         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5508         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5509                 phba->sli4_hba.lnk_info.lnk_tp);
5510         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5511         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5512         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5513         if (shdr_status || shdr_add_status || rc) {
5514                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5515                                 "3087 Mailbox x%x (x%x/x%x) failed: "
5516                                 "rc:x%x, status:x%x, add_status:x%x\n",
5517                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5518                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5519                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5520                                 rc, shdr_status, shdr_add_status);
5521                 rc = -ENXIO;
5522                 goto out_free_mboxq;
5523         }
5524         switch (phba->sli4_hba.lnk_info.lnk_no) {
5525         case LPFC_LINK_NUMBER_0:
5526                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5527                                 &get_port_name->u.response);
5528                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5529                 break;
5530         case LPFC_LINK_NUMBER_1:
5531                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5532                                 &get_port_name->u.response);
5533                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5534                 break;
5535         case LPFC_LINK_NUMBER_2:
5536                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5537                                 &get_port_name->u.response);
5538                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5539                 break;
5540         case LPFC_LINK_NUMBER_3:
5541                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5542                                 &get_port_name->u.response);
5543                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5544                 break;
5545         default:
5546                 break;
5547         }
5548
5549         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5550                 phba->Port[0] = cport_name;
5551                 phba->Port[1] = '\0';
5552                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5553                                 "3091 SLI get port name: %s\n", phba->Port);
5554         }
5555
5556 out_free_mboxq:
5557         if (rc != MBX_TIMEOUT) {
5558                 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5559                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
5560                 else
5561                         mempool_free(mboxq, phba->mbox_mem_pool);
5562         }
5563         return rc;
5564 }
5565
5566 /**
5567  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5568  * @phba: pointer to lpfc hba data structure.
5569  *
5570  * This routine is called to explicitly arm the SLI4 device's completion and
5571  * event queues
5572  **/
5573 static void
5574 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5575 {
5576         int qidx;
5577         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5578         struct lpfc_sli4_hdw_queue *qp;
5579         struct lpfc_queue *eq;
5580
5581         sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5582         sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5583         if (sli4_hba->nvmels_cq)
5584                 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5585                                            LPFC_QUEUE_REARM);
5586
5587         if (sli4_hba->hdwq) {
5588                 /* Loop thru all Hardware Queues */
5589                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5590                         qp = &sli4_hba->hdwq[qidx];
5591                         /* ARM the corresponding CQ */
5592                         sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5593                                                 LPFC_QUEUE_REARM);
5594                 }
5595
5596                 /* Loop thru all IRQ vectors */
5597                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5598                         eq = sli4_hba->hba_eq_hdl[qidx].eq;
5599                         /* ARM the corresponding EQ */
5600                         sli4_hba->sli4_write_eq_db(phba, eq,
5601                                                    0, LPFC_QUEUE_REARM);
5602                 }
5603         }
5604
5605         if (phba->nvmet_support) {
5606                 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5607                         sli4_hba->sli4_write_cq_db(phba,
5608                                 sli4_hba->nvmet_cqset[qidx], 0,
5609                                 LPFC_QUEUE_REARM);
5610                 }
5611         }
5612 }
5613
5614 /**
5615  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5616  * @phba: Pointer to HBA context object.
5617  * @type: The resource extent type.
5618  * @extnt_count: buffer to hold port available extent count.
5619  * @extnt_size: buffer to hold element count per extent.
5620  *
5621  * This function calls the port and retrievs the number of available
5622  * extents and their size for a particular extent type.
5623  *
5624  * Returns: 0 if successful.  Nonzero otherwise.
5625  **/
5626 int
5627 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5628                                uint16_t *extnt_count, uint16_t *extnt_size)
5629 {
5630         int rc = 0;
5631         uint32_t length;
5632         uint32_t mbox_tmo;
5633         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5634         LPFC_MBOXQ_t *mbox;
5635
5636         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5637         if (!mbox)
5638                 return -ENOMEM;
5639
5640         /* Find out how many extents are available for this resource type */
5641         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5642                   sizeof(struct lpfc_sli4_cfg_mhdr));
5643         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5644                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5645                          length, LPFC_SLI4_MBX_EMBED);
5646
5647         /* Send an extents count of 0 - the GET doesn't use it. */
5648         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5649                                         LPFC_SLI4_MBX_EMBED);
5650         if (unlikely(rc)) {
5651                 rc = -EIO;
5652                 goto err_exit;
5653         }
5654
5655         if (!phba->sli4_hba.intr_enable)
5656                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5657         else {
5658                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5659                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5660         }
5661         if (unlikely(rc)) {
5662                 rc = -EIO;
5663                 goto err_exit;
5664         }
5665
5666         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5667         if (bf_get(lpfc_mbox_hdr_status,
5668                    &rsrc_info->header.cfg_shdr.response)) {
5669                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5670                                 "2930 Failed to get resource extents "
5671                                 "Status 0x%x Add'l Status 0x%x\n",
5672                                 bf_get(lpfc_mbox_hdr_status,
5673                                        &rsrc_info->header.cfg_shdr.response),
5674                                 bf_get(lpfc_mbox_hdr_add_status,
5675                                        &rsrc_info->header.cfg_shdr.response));
5676                 rc = -EIO;
5677                 goto err_exit;
5678         }
5679
5680         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5681                               &rsrc_info->u.rsp);
5682         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5683                              &rsrc_info->u.rsp);
5684
5685         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5686                         "3162 Retrieved extents type-%d from port: count:%d, "
5687                         "size:%d\n", type, *extnt_count, *extnt_size);
5688
5689 err_exit:
5690         mempool_free(mbox, phba->mbox_mem_pool);
5691         return rc;
5692 }
5693
5694 /**
5695  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5696  * @phba: Pointer to HBA context object.
5697  * @type: The extent type to check.
5698  *
5699  * This function reads the current available extents from the port and checks
5700  * if the extent count or extent size has changed since the last access.
5701  * Callers use this routine post port reset to understand if there is a
5702  * extent reprovisioning requirement.
5703  *
5704  * Returns:
5705  *   -Error: error indicates problem.
5706  *   1: Extent count or size has changed.
5707  *   0: No changes.
5708  **/
5709 static int
5710 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5711 {
5712         uint16_t curr_ext_cnt, rsrc_ext_cnt;
5713         uint16_t size_diff, rsrc_ext_size;
5714         int rc = 0;
5715         struct lpfc_rsrc_blks *rsrc_entry;
5716         struct list_head *rsrc_blk_list = NULL;
5717
5718         size_diff = 0;
5719         curr_ext_cnt = 0;
5720         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5721                                             &rsrc_ext_cnt,
5722                                             &rsrc_ext_size);
5723         if (unlikely(rc))
5724                 return -EIO;
5725
5726         switch (type) {
5727         case LPFC_RSC_TYPE_FCOE_RPI:
5728                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5729                 break;
5730         case LPFC_RSC_TYPE_FCOE_VPI:
5731                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5732                 break;
5733         case LPFC_RSC_TYPE_FCOE_XRI:
5734                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5735                 break;
5736         case LPFC_RSC_TYPE_FCOE_VFI:
5737                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5738                 break;
5739         default:
5740                 break;
5741         }
5742
5743         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5744                 curr_ext_cnt++;
5745                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5746                         size_diff++;
5747         }
5748
5749         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5750                 rc = 1;
5751
5752         return rc;
5753 }
5754
5755 /**
5756  * lpfc_sli4_cfg_post_extnts -
5757  * @phba: Pointer to HBA context object.
5758  * @extnt_cnt - number of available extents.
5759  * @type - the extent type (rpi, xri, vfi, vpi).
5760  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5761  * @mbox - pointer to the caller's allocated mailbox structure.
5762  *
5763  * This function executes the extents allocation request.  It also
5764  * takes care of the amount of memory needed to allocate or get the
5765  * allocated extents. It is the caller's responsibility to evaluate
5766  * the response.
5767  *
5768  * Returns:
5769  *   -Error:  Error value describes the condition found.
5770  *   0: if successful
5771  **/
5772 static int
5773 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5774                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5775 {
5776         int rc = 0;
5777         uint32_t req_len;
5778         uint32_t emb_len;
5779         uint32_t alloc_len, mbox_tmo;
5780
5781         /* Calculate the total requested length of the dma memory */
5782         req_len = extnt_cnt * sizeof(uint16_t);
5783
5784         /*
5785          * Calculate the size of an embedded mailbox.  The uint32_t
5786          * accounts for extents-specific word.
5787          */
5788         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5789                 sizeof(uint32_t);
5790
5791         /*
5792          * Presume the allocation and response will fit into an embedded
5793          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5794          */
5795         *emb = LPFC_SLI4_MBX_EMBED;
5796         if (req_len > emb_len) {
5797                 req_len = extnt_cnt * sizeof(uint16_t) +
5798                         sizeof(union lpfc_sli4_cfg_shdr) +
5799                         sizeof(uint32_t);
5800                 *emb = LPFC_SLI4_MBX_NEMBED;
5801         }
5802
5803         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5804                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5805                                      req_len, *emb);
5806         if (alloc_len < req_len) {
5807                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5808                         "2982 Allocated DMA memory size (x%x) is "
5809                         "less than the requested DMA memory "
5810                         "size (x%x)\n", alloc_len, req_len);
5811                 return -ENOMEM;
5812         }
5813         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5814         if (unlikely(rc))
5815                 return -EIO;
5816
5817         if (!phba->sli4_hba.intr_enable)
5818                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5819         else {
5820                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5821                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5822         }
5823
5824         if (unlikely(rc))
5825                 rc = -EIO;
5826         return rc;
5827 }
5828
5829 /**
5830  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5831  * @phba: Pointer to HBA context object.
5832  * @type:  The resource extent type to allocate.
5833  *
5834  * This function allocates the number of elements for the specified
5835  * resource type.
5836  **/
5837 static int
5838 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5839 {
5840         bool emb = false;
5841         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5842         uint16_t rsrc_id, rsrc_start, j, k;
5843         uint16_t *ids;
5844         int i, rc;
5845         unsigned long longs;
5846         unsigned long *bmask;
5847         struct lpfc_rsrc_blks *rsrc_blks;
5848         LPFC_MBOXQ_t *mbox;
5849         uint32_t length;
5850         struct lpfc_id_range *id_array = NULL;
5851         void *virtaddr = NULL;
5852         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5853         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5854         struct list_head *ext_blk_list;
5855
5856         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5857                                             &rsrc_cnt,
5858                                             &rsrc_size);
5859         if (unlikely(rc))
5860                 return -EIO;
5861
5862         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5863                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5864                         "3009 No available Resource Extents "
5865                         "for resource type 0x%x: Count: 0x%x, "
5866                         "Size 0x%x\n", type, rsrc_cnt,
5867                         rsrc_size);
5868                 return -ENOMEM;
5869         }
5870
5871         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5872                         "2903 Post resource extents type-0x%x: "
5873                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5874
5875         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5876         if (!mbox)
5877                 return -ENOMEM;
5878
5879         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5880         if (unlikely(rc)) {
5881                 rc = -EIO;
5882                 goto err_exit;
5883         }
5884
5885         /*
5886          * Figure out where the response is located.  Then get local pointers
5887          * to the response data.  The port does not guarantee to respond to
5888          * all extents counts request so update the local variable with the
5889          * allocated count from the port.
5890          */
5891         if (emb == LPFC_SLI4_MBX_EMBED) {
5892                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5893                 id_array = &rsrc_ext->u.rsp.id[0];
5894                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5895         } else {
5896                 virtaddr = mbox->sge_array->addr[0];
5897                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5898                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5899                 id_array = &n_rsrc->id;
5900         }
5901
5902         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5903         rsrc_id_cnt = rsrc_cnt * rsrc_size;
5904
5905         /*
5906          * Based on the resource size and count, correct the base and max
5907          * resource values.
5908          */
5909         length = sizeof(struct lpfc_rsrc_blks);
5910         switch (type) {
5911         case LPFC_RSC_TYPE_FCOE_RPI:
5912                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5913                                                    sizeof(unsigned long),
5914                                                    GFP_KERNEL);
5915                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5916                         rc = -ENOMEM;
5917                         goto err_exit;
5918                 }
5919                 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5920                                                  sizeof(uint16_t),
5921                                                  GFP_KERNEL);
5922                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5923                         kfree(phba->sli4_hba.rpi_bmask);
5924                         rc = -ENOMEM;
5925                         goto err_exit;
5926                 }
5927
5928                 /*
5929                  * The next_rpi was initialized with the maximum available
5930                  * count but the port may allocate a smaller number.  Catch
5931                  * that case and update the next_rpi.
5932                  */
5933                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5934
5935                 /* Initialize local ptrs for common extent processing later. */
5936                 bmask = phba->sli4_hba.rpi_bmask;
5937                 ids = phba->sli4_hba.rpi_ids;
5938                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5939                 break;
5940         case LPFC_RSC_TYPE_FCOE_VPI:
5941                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5942                                           GFP_KERNEL);
5943                 if (unlikely(!phba->vpi_bmask)) {
5944                         rc = -ENOMEM;
5945                         goto err_exit;
5946                 }
5947                 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5948                                          GFP_KERNEL);
5949                 if (unlikely(!phba->vpi_ids)) {
5950                         kfree(phba->vpi_bmask);
5951                         rc = -ENOMEM;
5952                         goto err_exit;
5953                 }
5954
5955                 /* Initialize local ptrs for common extent processing later. */
5956                 bmask = phba->vpi_bmask;
5957                 ids = phba->vpi_ids;
5958                 ext_blk_list = &phba->lpfc_vpi_blk_list;
5959                 break;
5960         case LPFC_RSC_TYPE_FCOE_XRI:
5961                 phba->sli4_hba.xri_bmask = kcalloc(longs,
5962                                                    sizeof(unsigned long),
5963                                                    GFP_KERNEL);
5964                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5965                         rc = -ENOMEM;
5966                         goto err_exit;
5967                 }
5968                 phba->sli4_hba.max_cfg_param.xri_used = 0;
5969                 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5970                                                  sizeof(uint16_t),
5971                                                  GFP_KERNEL);
5972                 if (unlikely(!phba->sli4_hba.xri_ids)) {
5973                         kfree(phba->sli4_hba.xri_bmask);
5974                         rc = -ENOMEM;
5975                         goto err_exit;
5976                 }
5977
5978                 /* Initialize local ptrs for common extent processing later. */
5979                 bmask = phba->sli4_hba.xri_bmask;
5980                 ids = phba->sli4_hba.xri_ids;
5981                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5982                 break;
5983         case LPFC_RSC_TYPE_FCOE_VFI:
5984                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5985                                                    sizeof(unsigned long),
5986                                                    GFP_KERNEL);
5987                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5988                         rc = -ENOMEM;
5989                         goto err_exit;
5990                 }
5991                 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
5992                                                  sizeof(uint16_t),
5993                                                  GFP_KERNEL);
5994                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5995                         kfree(phba->sli4_hba.vfi_bmask);
5996                         rc = -ENOMEM;
5997                         goto err_exit;
5998                 }
5999
6000                 /* Initialize local ptrs for common extent processing later. */
6001                 bmask = phba->sli4_hba.vfi_bmask;
6002                 ids = phba->sli4_hba.vfi_ids;
6003                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6004                 break;
6005         default:
6006                 /* Unsupported Opcode.  Fail call. */
6007                 id_array = NULL;
6008                 bmask = NULL;
6009                 ids = NULL;
6010                 ext_blk_list = NULL;
6011                 goto err_exit;
6012         }
6013
6014         /*
6015          * Complete initializing the extent configuration with the
6016          * allocated ids assigned to this function.  The bitmask serves
6017          * as an index into the array and manages the available ids.  The
6018          * array just stores the ids communicated to the port via the wqes.
6019          */
6020         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6021                 if ((i % 2) == 0)
6022                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6023                                          &id_array[k]);
6024                 else
6025                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6026                                          &id_array[k]);
6027
6028                 rsrc_blks = kzalloc(length, GFP_KERNEL);
6029                 if (unlikely(!rsrc_blks)) {
6030                         rc = -ENOMEM;
6031                         kfree(bmask);
6032                         kfree(ids);
6033                         goto err_exit;
6034                 }
6035                 rsrc_blks->rsrc_start = rsrc_id;
6036                 rsrc_blks->rsrc_size = rsrc_size;
6037                 list_add_tail(&rsrc_blks->list, ext_blk_list);
6038                 rsrc_start = rsrc_id;
6039                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6040                         phba->sli4_hba.io_xri_start = rsrc_start +
6041                                 lpfc_sli4_get_iocb_cnt(phba);
6042                 }
6043
6044                 while (rsrc_id < (rsrc_start + rsrc_size)) {
6045                         ids[j] = rsrc_id;
6046                         rsrc_id++;
6047                         j++;
6048                 }
6049                 /* Entire word processed.  Get next word.*/
6050                 if ((i % 2) == 1)
6051                         k++;
6052         }
6053  err_exit:
6054         lpfc_sli4_mbox_cmd_free(phba, mbox);
6055         return rc;
6056 }
6057
6058
6059
6060 /**
6061  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6062  * @phba: Pointer to HBA context object.
6063  * @type: the extent's type.
6064  *
6065  * This function deallocates all extents of a particular resource type.
6066  * SLI4 does not allow for deallocating a particular extent range.  It
6067  * is the caller's responsibility to release all kernel memory resources.
6068  **/
6069 static int
6070 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6071 {
6072         int rc;
6073         uint32_t length, mbox_tmo = 0;
6074         LPFC_MBOXQ_t *mbox;
6075         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6076         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6077
6078         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6079         if (!mbox)
6080                 return -ENOMEM;
6081
6082         /*
6083          * This function sends an embedded mailbox because it only sends the
6084          * the resource type.  All extents of this type are released by the
6085          * port.
6086          */
6087         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6088                   sizeof(struct lpfc_sli4_cfg_mhdr));
6089         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6090                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6091                          length, LPFC_SLI4_MBX_EMBED);
6092
6093         /* Send an extents count of 0 - the dealloc doesn't use it. */
6094         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6095                                         LPFC_SLI4_MBX_EMBED);
6096         if (unlikely(rc)) {
6097                 rc = -EIO;
6098                 goto out_free_mbox;
6099         }
6100         if (!phba->sli4_hba.intr_enable)
6101                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6102         else {
6103                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6104                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6105         }
6106         if (unlikely(rc)) {
6107                 rc = -EIO;
6108                 goto out_free_mbox;
6109         }
6110
6111         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6112         if (bf_get(lpfc_mbox_hdr_status,
6113                    &dealloc_rsrc->header.cfg_shdr.response)) {
6114                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6115                                 "2919 Failed to release resource extents "
6116                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
6117                                 "Resource memory not released.\n",
6118                                 type,
6119                                 bf_get(lpfc_mbox_hdr_status,
6120                                     &dealloc_rsrc->header.cfg_shdr.response),
6121                                 bf_get(lpfc_mbox_hdr_add_status,
6122                                     &dealloc_rsrc->header.cfg_shdr.response));
6123                 rc = -EIO;
6124                 goto out_free_mbox;
6125         }
6126
6127         /* Release kernel memory resources for the specific type. */
6128         switch (type) {
6129         case LPFC_RSC_TYPE_FCOE_VPI:
6130                 kfree(phba->vpi_bmask);
6131                 kfree(phba->vpi_ids);
6132                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6133                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6134                                     &phba->lpfc_vpi_blk_list, list) {
6135                         list_del_init(&rsrc_blk->list);
6136                         kfree(rsrc_blk);
6137                 }
6138                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6139                 break;
6140         case LPFC_RSC_TYPE_FCOE_XRI:
6141                 kfree(phba->sli4_hba.xri_bmask);
6142                 kfree(phba->sli4_hba.xri_ids);
6143                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6144                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
6145                         list_del_init(&rsrc_blk->list);
6146                         kfree(rsrc_blk);
6147                 }
6148                 break;
6149         case LPFC_RSC_TYPE_FCOE_VFI:
6150                 kfree(phba->sli4_hba.vfi_bmask);
6151                 kfree(phba->sli4_hba.vfi_ids);
6152                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6153                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6154                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6155                         list_del_init(&rsrc_blk->list);
6156                         kfree(rsrc_blk);
6157                 }
6158                 break;
6159         case LPFC_RSC_TYPE_FCOE_RPI:
6160                 /* RPI bitmask and physical id array are cleaned up earlier. */
6161                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6162                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6163                         list_del_init(&rsrc_blk->list);
6164                         kfree(rsrc_blk);
6165                 }
6166                 break;
6167         default:
6168                 break;
6169         }
6170
6171         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6172
6173  out_free_mbox:
6174         mempool_free(mbox, phba->mbox_mem_pool);
6175         return rc;
6176 }
6177
6178 static void
6179 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6180                   uint32_t feature)
6181 {
6182         uint32_t len;
6183
6184         len = sizeof(struct lpfc_mbx_set_feature) -
6185                 sizeof(struct lpfc_sli4_cfg_mhdr);
6186         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6187                          LPFC_MBOX_OPCODE_SET_FEATURES, len,
6188                          LPFC_SLI4_MBX_EMBED);
6189
6190         switch (feature) {
6191         case LPFC_SET_UE_RECOVERY:
6192                 bf_set(lpfc_mbx_set_feature_UER,
6193                        &mbox->u.mqe.un.set_feature, 1);
6194                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6195                 mbox->u.mqe.un.set_feature.param_len = 8;
6196                 break;
6197         case LPFC_SET_MDS_DIAGS:
6198                 bf_set(lpfc_mbx_set_feature_mds,
6199                        &mbox->u.mqe.un.set_feature, 1);
6200                 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6201                        &mbox->u.mqe.un.set_feature, 1);
6202                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6203                 mbox->u.mqe.un.set_feature.param_len = 8;
6204                 break;
6205         }
6206
6207         return;
6208 }
6209
6210 /**
6211  * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6212  * @phba: Pointer to HBA context object.
6213  *
6214  * Disable FW logging into host memory on the adapter. To
6215  * be done before reading logs from the host memory.
6216  **/
6217 void
6218 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6219 {
6220         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6221
6222         ras_fwlog->ras_active = false;
6223
6224         /* Disable FW logging to host memory */
6225         writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6226                phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6227 }
6228
6229 /**
6230  * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6231  * @phba: Pointer to HBA context object.
6232  *
6233  * This function is called to free memory allocated for RAS FW logging
6234  * support in the driver.
6235  **/
6236 void
6237 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6238 {
6239         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6240         struct lpfc_dmabuf *dmabuf, *next;
6241
6242         if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6243                 list_for_each_entry_safe(dmabuf, next,
6244                                     &ras_fwlog->fwlog_buff_list,
6245                                     list) {
6246                         list_del(&dmabuf->list);
6247                         dma_free_coherent(&phba->pcidev->dev,
6248                                           LPFC_RAS_MAX_ENTRY_SIZE,
6249                                           dmabuf->virt, dmabuf->phys);
6250                         kfree(dmabuf);
6251                 }
6252         }
6253
6254         if (ras_fwlog->lwpd.virt) {
6255                 dma_free_coherent(&phba->pcidev->dev,
6256                                   sizeof(uint32_t) * 2,
6257                                   ras_fwlog->lwpd.virt,
6258                                   ras_fwlog->lwpd.phys);
6259                 ras_fwlog->lwpd.virt = NULL;
6260         }
6261
6262         ras_fwlog->ras_active = false;
6263 }
6264
6265 /**
6266  * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6267  * @phba: Pointer to HBA context object.
6268  * @fwlog_buff_count: Count of buffers to be created.
6269  *
6270  * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6271  * to update FW log is posted to the adapter.
6272  * Buffer count is calculated based on module param ras_fwlog_buffsize
6273  * Size of each buffer posted to FW is 64K.
6274  **/
6275
6276 static int
6277 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6278                         uint32_t fwlog_buff_count)
6279 {
6280         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6281         struct lpfc_dmabuf *dmabuf;
6282         int rc = 0, i = 0;
6283
6284         /* Initialize List */
6285         INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6286
6287         /* Allocate memory for the LWPD */
6288         ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6289                                             sizeof(uint32_t) * 2,
6290                                             &ras_fwlog->lwpd.phys,
6291                                             GFP_KERNEL);
6292         if (!ras_fwlog->lwpd.virt) {
6293                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6294                                 "6185 LWPD Memory Alloc Failed\n");
6295
6296                 return -ENOMEM;
6297         }
6298
6299         ras_fwlog->fw_buffcount = fwlog_buff_count;
6300         for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6301                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6302                                  GFP_KERNEL);
6303                 if (!dmabuf) {
6304                         rc = -ENOMEM;
6305                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6306                                         "6186 Memory Alloc failed FW logging");
6307                         goto free_mem;
6308                 }
6309
6310                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6311                                                   LPFC_RAS_MAX_ENTRY_SIZE,
6312                                                   &dmabuf->phys, GFP_KERNEL);
6313                 if (!dmabuf->virt) {
6314                         kfree(dmabuf);
6315                         rc = -ENOMEM;
6316                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6317                                         "6187 DMA Alloc Failed FW logging");
6318                         goto free_mem;
6319                 }
6320                 dmabuf->buffer_tag = i;
6321                 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6322         }
6323
6324 free_mem:
6325         if (rc)
6326                 lpfc_sli4_ras_dma_free(phba);
6327
6328         return rc;
6329 }
6330
6331 /**
6332  * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6333  * @phba: pointer to lpfc hba data structure.
6334  * @pmboxq: pointer to the driver internal queue element for mailbox command.
6335  *
6336  * Completion handler for driver's RAS MBX command to the device.
6337  **/
6338 static void
6339 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6340 {
6341         MAILBOX_t *mb;
6342         union lpfc_sli4_cfg_shdr *shdr;
6343         uint32_t shdr_status, shdr_add_status;
6344         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6345
6346         mb = &pmb->u.mb;
6347
6348         shdr = (union lpfc_sli4_cfg_shdr *)
6349                 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6350         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6351         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6352
6353         if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6354                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6355                                 "6188 FW LOG mailbox "
6356                                 "completed with status x%x add_status x%x,"
6357                                 " mbx status x%x\n",
6358                                 shdr_status, shdr_add_status, mb->mbxStatus);
6359
6360                 ras_fwlog->ras_hwsupport = false;
6361                 goto disable_ras;
6362         }
6363
6364         ras_fwlog->ras_active = true;
6365         mempool_free(pmb, phba->mbox_mem_pool);
6366
6367         return;
6368
6369 disable_ras:
6370         /* Free RAS DMA memory */
6371         lpfc_sli4_ras_dma_free(phba);
6372         mempool_free(pmb, phba->mbox_mem_pool);
6373 }
6374
6375 /**
6376  * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6377  * @phba: pointer to lpfc hba data structure.
6378  * @fwlog_level: Logging verbosity level.
6379  * @fwlog_enable: Enable/Disable logging.
6380  *
6381  * Initialize memory and post mailbox command to enable FW logging in host
6382  * memory.
6383  **/
6384 int
6385 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6386                          uint32_t fwlog_level,
6387                          uint32_t fwlog_enable)
6388 {
6389         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6390         struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6391         struct lpfc_dmabuf *dmabuf;
6392         LPFC_MBOXQ_t *mbox;
6393         uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6394         int rc = 0;
6395
6396         fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6397                           phba->cfg_ras_fwlog_buffsize);
6398         fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6399
6400         /*
6401          * If re-enabling FW logging support use earlier allocated
6402          * DMA buffers while posting MBX command.
6403          **/
6404         if (!ras_fwlog->lwpd.virt) {
6405                 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6406                 if (rc) {
6407                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6408                                         "6189 FW Log Memory Allocation Failed");
6409                         return rc;
6410                 }
6411         }
6412
6413         /* Setup Mailbox command */
6414         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6415         if (!mbox) {
6416                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6417                                 "6190 RAS MBX Alloc Failed");
6418                 rc = -ENOMEM;
6419                 goto mem_free;
6420         }
6421
6422         ras_fwlog->fw_loglevel = fwlog_level;
6423         len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6424                 sizeof(struct lpfc_sli4_cfg_mhdr));
6425
6426         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6427                          LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6428                          len, LPFC_SLI4_MBX_EMBED);
6429
6430         mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6431         bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6432                fwlog_enable);
6433         bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6434                ras_fwlog->fw_loglevel);
6435         bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6436                ras_fwlog->fw_buffcount);
6437         bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6438                LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6439
6440         /* Update DMA buffer address */
6441         list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6442                 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6443
6444                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6445                         putPaddrLow(dmabuf->phys);
6446
6447                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6448                         putPaddrHigh(dmabuf->phys);
6449         }
6450
6451         /* Update LPWD address */
6452         mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6453         mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6454
6455         mbox->vport = phba->pport;
6456         mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6457
6458         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6459
6460         if (rc == MBX_NOT_FINISHED) {
6461                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6462                                 "6191 FW-Log Mailbox failed. "
6463                                 "status %d mbxStatus : x%x", rc,
6464                                 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6465                 mempool_free(mbox, phba->mbox_mem_pool);
6466                 rc = -EIO;
6467                 goto mem_free;
6468         } else
6469                 rc = 0;
6470 mem_free:
6471         if (rc)
6472                 lpfc_sli4_ras_dma_free(phba);
6473
6474         return rc;
6475 }
6476
6477 /**
6478  * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6479  * @phba: Pointer to HBA context object.
6480  *
6481  * Check if RAS is supported on the adapter and initialize it.
6482  **/
6483 void
6484 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6485 {
6486         /* Check RAS FW Log needs to be enabled or not */
6487         if (lpfc_check_fwlog_support(phba))
6488                 return;
6489
6490         lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6491                                  LPFC_RAS_ENABLE_LOGGING);
6492 }
6493
6494 /**
6495  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6496  * @phba: Pointer to HBA context object.
6497  *
6498  * This function allocates all SLI4 resource identifiers.
6499  **/
6500 int
6501 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6502 {
6503         int i, rc, error = 0;
6504         uint16_t count, base;
6505         unsigned long longs;
6506
6507         if (!phba->sli4_hba.rpi_hdrs_in_use)
6508                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6509         if (phba->sli4_hba.extents_in_use) {
6510                 /*
6511                  * The port supports resource extents. The XRI, VPI, VFI, RPI
6512                  * resource extent count must be read and allocated before
6513                  * provisioning the resource id arrays.
6514                  */
6515                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6516                     LPFC_IDX_RSRC_RDY) {
6517                         /*
6518                          * Extent-based resources are set - the driver could
6519                          * be in a port reset. Figure out if any corrective
6520                          * actions need to be taken.
6521                          */
6522                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6523                                                  LPFC_RSC_TYPE_FCOE_VFI);
6524                         if (rc != 0)
6525                                 error++;
6526                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6527                                                  LPFC_RSC_TYPE_FCOE_VPI);
6528                         if (rc != 0)
6529                                 error++;
6530                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6531                                                  LPFC_RSC_TYPE_FCOE_XRI);
6532                         if (rc != 0)
6533                                 error++;
6534                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6535                                                  LPFC_RSC_TYPE_FCOE_RPI);
6536                         if (rc != 0)
6537                                 error++;
6538
6539                         /*
6540                          * It's possible that the number of resources
6541                          * provided to this port instance changed between
6542                          * resets.  Detect this condition and reallocate
6543                          * resources.  Otherwise, there is no action.
6544                          */
6545                         if (error) {
6546                                 lpfc_printf_log(phba, KERN_INFO,
6547                                                 LOG_MBOX | LOG_INIT,
6548                                                 "2931 Detected extent resource "
6549                                                 "change.  Reallocating all "
6550                                                 "extents.\n");
6551                                 rc = lpfc_sli4_dealloc_extent(phba,
6552                                                  LPFC_RSC_TYPE_FCOE_VFI);
6553                                 rc = lpfc_sli4_dealloc_extent(phba,
6554                                                  LPFC_RSC_TYPE_FCOE_VPI);
6555                                 rc = lpfc_sli4_dealloc_extent(phba,
6556                                                  LPFC_RSC_TYPE_FCOE_XRI);
6557                                 rc = lpfc_sli4_dealloc_extent(phba,
6558                                                  LPFC_RSC_TYPE_FCOE_RPI);
6559                         } else
6560                                 return 0;
6561                 }
6562
6563                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6564                 if (unlikely(rc))
6565                         goto err_exit;
6566
6567                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6568                 if (unlikely(rc))
6569                         goto err_exit;
6570
6571                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6572                 if (unlikely(rc))
6573                         goto err_exit;
6574
6575                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6576                 if (unlikely(rc))
6577                         goto err_exit;
6578                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6579                        LPFC_IDX_RSRC_RDY);
6580                 return rc;
6581         } else {
6582                 /*
6583                  * The port does not support resource extents.  The XRI, VPI,
6584                  * VFI, RPI resource ids were determined from READ_CONFIG.
6585                  * Just allocate the bitmasks and provision the resource id
6586                  * arrays.  If a port reset is active, the resources don't
6587                  * need any action - just exit.
6588                  */
6589                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6590                     LPFC_IDX_RSRC_RDY) {
6591                         lpfc_sli4_dealloc_resource_identifiers(phba);
6592                         lpfc_sli4_remove_rpis(phba);
6593                 }
6594                 /* RPIs. */
6595                 count = phba->sli4_hba.max_cfg_param.max_rpi;
6596                 if (count <= 0) {
6597                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6598                                         "3279 Invalid provisioning of "
6599                                         "rpi:%d\n", count);
6600                         rc = -EINVAL;
6601                         goto err_exit;
6602                 }
6603                 base = phba->sli4_hba.max_cfg_param.rpi_base;
6604                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6605                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6606                                                    sizeof(unsigned long),
6607                                                    GFP_KERNEL);
6608                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6609                         rc = -ENOMEM;
6610                         goto err_exit;
6611                 }
6612                 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6613                                                  GFP_KERNEL);
6614                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6615                         rc = -ENOMEM;
6616                         goto free_rpi_bmask;
6617                 }
6618
6619                 for (i = 0; i < count; i++)
6620                         phba->sli4_hba.rpi_ids[i] = base + i;
6621
6622                 /* VPIs. */
6623                 count = phba->sli4_hba.max_cfg_param.max_vpi;
6624                 if (count <= 0) {
6625                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6626                                         "3280 Invalid provisioning of "
6627                                         "vpi:%d\n", count);
6628                         rc = -EINVAL;
6629                         goto free_rpi_ids;
6630                 }
6631                 base = phba->sli4_hba.max_cfg_param.vpi_base;
6632                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6633                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6634                                           GFP_KERNEL);
6635                 if (unlikely(!phba->vpi_bmask)) {
6636                         rc = -ENOMEM;
6637                         goto free_rpi_ids;
6638                 }
6639                 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6640                                         GFP_KERNEL);
6641                 if (unlikely(!phba->vpi_ids)) {
6642                         rc = -ENOMEM;
6643                         goto free_vpi_bmask;
6644                 }
6645
6646                 for (i = 0; i < count; i++)
6647                         phba->vpi_ids[i] = base + i;
6648
6649                 /* XRIs. */
6650                 count = phba->sli4_hba.max_cfg_param.max_xri;
6651                 if (count <= 0) {
6652                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6653                                         "3281 Invalid provisioning of "
6654                                         "xri:%d\n", count);
6655                         rc = -EINVAL;
6656                         goto free_vpi_ids;
6657                 }
6658                 base = phba->sli4_hba.max_cfg_param.xri_base;
6659                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6660                 phba->sli4_hba.xri_bmask = kcalloc(longs,
6661                                                    sizeof(unsigned long),
6662                                                    GFP_KERNEL);
6663                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6664                         rc = -ENOMEM;
6665                         goto free_vpi_ids;
6666                 }
6667                 phba->sli4_hba.max_cfg_param.xri_used = 0;
6668                 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6669                                                  GFP_KERNEL);
6670                 if (unlikely(!phba->sli4_hba.xri_ids)) {
6671                         rc = -ENOMEM;
6672                         goto free_xri_bmask;
6673                 }
6674
6675                 for (i = 0; i < count; i++)
6676                         phba->sli4_hba.xri_ids[i] = base + i;
6677
6678                 /* VFIs. */
6679                 count = phba->sli4_hba.max_cfg_param.max_vfi;
6680                 if (count <= 0) {
6681                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6682                                         "3282 Invalid provisioning of "
6683                                         "vfi:%d\n", count);
6684                         rc = -EINVAL;
6685                         goto free_xri_ids;
6686                 }
6687                 base = phba->sli4_hba.max_cfg_param.vfi_base;
6688                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6689                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6690                                                    sizeof(unsigned long),
6691                                                    GFP_KERNEL);
6692                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6693                         rc = -ENOMEM;
6694                         goto free_xri_ids;
6695                 }
6696                 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6697                                                  GFP_KERNEL);
6698                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6699                         rc = -ENOMEM;
6700                         goto free_vfi_bmask;
6701                 }
6702
6703                 for (i = 0; i < count; i++)
6704                         phba->sli4_hba.vfi_ids[i] = base + i;
6705
6706                 /*
6707                  * Mark all resources ready.  An HBA reset doesn't need
6708                  * to reset the initialization.
6709                  */
6710                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6711                        LPFC_IDX_RSRC_RDY);
6712                 return 0;
6713         }
6714
6715  free_vfi_bmask:
6716         kfree(phba->sli4_hba.vfi_bmask);
6717         phba->sli4_hba.vfi_bmask = NULL;
6718  free_xri_ids:
6719         kfree(phba->sli4_hba.xri_ids);
6720         phba->sli4_hba.xri_ids = NULL;
6721  free_xri_bmask:
6722         kfree(phba->sli4_hba.xri_bmask);
6723         phba->sli4_hba.xri_bmask = NULL;
6724  free_vpi_ids:
6725         kfree(phba->vpi_ids);
6726         phba->vpi_ids = NULL;
6727  free_vpi_bmask:
6728         kfree(phba->vpi_bmask);
6729         phba->vpi_bmask = NULL;
6730  free_rpi_ids:
6731         kfree(phba->sli4_hba.rpi_ids);
6732         phba->sli4_hba.rpi_ids = NULL;
6733  free_rpi_bmask:
6734         kfree(phba->sli4_hba.rpi_bmask);
6735         phba->sli4_hba.rpi_bmask = NULL;
6736  err_exit:
6737         return rc;
6738 }
6739
6740 /**
6741  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6742  * @phba: Pointer to HBA context object.
6743  *
6744  * This function allocates the number of elements for the specified
6745  * resource type.
6746  **/
6747 int
6748 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6749 {
6750         if (phba->sli4_hba.extents_in_use) {
6751                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6752                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6753                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6754                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6755         } else {
6756                 kfree(phba->vpi_bmask);
6757                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6758                 kfree(phba->vpi_ids);
6759                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6760                 kfree(phba->sli4_hba.xri_bmask);
6761                 kfree(phba->sli4_hba.xri_ids);
6762                 kfree(phba->sli4_hba.vfi_bmask);
6763                 kfree(phba->sli4_hba.vfi_ids);
6764                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6765                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6766         }
6767
6768         return 0;
6769 }
6770
6771 /**
6772  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6773  * @phba: Pointer to HBA context object.
6774  * @type: The resource extent type.
6775  * @extnt_count: buffer to hold port extent count response
6776  * @extnt_size: buffer to hold port extent size response.
6777  *
6778  * This function calls the port to read the host allocated extents
6779  * for a particular type.
6780  **/
6781 int
6782 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6783                                uint16_t *extnt_cnt, uint16_t *extnt_size)
6784 {
6785         bool emb;
6786         int rc = 0;
6787         uint16_t curr_blks = 0;
6788         uint32_t req_len, emb_len;
6789         uint32_t alloc_len, mbox_tmo;
6790         struct list_head *blk_list_head;
6791         struct lpfc_rsrc_blks *rsrc_blk;
6792         LPFC_MBOXQ_t *mbox;
6793         void *virtaddr = NULL;
6794         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6795         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6796         union  lpfc_sli4_cfg_shdr *shdr;
6797
6798         switch (type) {
6799         case LPFC_RSC_TYPE_FCOE_VPI:
6800                 blk_list_head = &phba->lpfc_vpi_blk_list;
6801                 break;
6802         case LPFC_RSC_TYPE_FCOE_XRI:
6803                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6804                 break;
6805         case LPFC_RSC_TYPE_FCOE_VFI:
6806                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6807                 break;
6808         case LPFC_RSC_TYPE_FCOE_RPI:
6809                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6810                 break;
6811         default:
6812                 return -EIO;
6813         }
6814
6815         /* Count the number of extents currently allocatd for this type. */
6816         list_for_each_entry(rsrc_blk, blk_list_head, list) {
6817                 if (curr_blks == 0) {
6818                         /*
6819                          * The GET_ALLOCATED mailbox does not return the size,
6820                          * just the count.  The size should be just the size
6821                          * stored in the current allocated block and all sizes
6822                          * for an extent type are the same so set the return
6823                          * value now.
6824                          */
6825                         *extnt_size = rsrc_blk->rsrc_size;
6826                 }
6827                 curr_blks++;
6828         }
6829
6830         /*
6831          * Calculate the size of an embedded mailbox.  The uint32_t
6832          * accounts for extents-specific word.
6833          */
6834         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6835                 sizeof(uint32_t);
6836
6837         /*
6838          * Presume the allocation and response will fit into an embedded
6839          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6840          */
6841         emb = LPFC_SLI4_MBX_EMBED;
6842         req_len = emb_len;
6843         if (req_len > emb_len) {
6844                 req_len = curr_blks * sizeof(uint16_t) +
6845                         sizeof(union lpfc_sli4_cfg_shdr) +
6846                         sizeof(uint32_t);
6847                 emb = LPFC_SLI4_MBX_NEMBED;
6848         }
6849
6850         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6851         if (!mbox)
6852                 return -ENOMEM;
6853         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6854
6855         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6856                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6857                                      req_len, emb);
6858         if (alloc_len < req_len) {
6859                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6860                         "2983 Allocated DMA memory size (x%x) is "
6861                         "less than the requested DMA memory "
6862                         "size (x%x)\n", alloc_len, req_len);
6863                 rc = -ENOMEM;
6864                 goto err_exit;
6865         }
6866         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6867         if (unlikely(rc)) {
6868                 rc = -EIO;
6869                 goto err_exit;
6870         }
6871
6872         if (!phba->sli4_hba.intr_enable)
6873                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6874         else {
6875                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6876                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6877         }
6878
6879         if (unlikely(rc)) {
6880                 rc = -EIO;
6881                 goto err_exit;
6882         }
6883
6884         /*
6885          * Figure out where the response is located.  Then get local pointers
6886          * to the response data.  The port does not guarantee to respond to
6887          * all extents counts request so update the local variable with the
6888          * allocated count from the port.
6889          */
6890         if (emb == LPFC_SLI4_MBX_EMBED) {
6891                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6892                 shdr = &rsrc_ext->header.cfg_shdr;
6893                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6894         } else {
6895                 virtaddr = mbox->sge_array->addr[0];
6896                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6897                 shdr = &n_rsrc->cfg_shdr;
6898                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6899         }
6900
6901         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6902                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6903                         "2984 Failed to read allocated resources "
6904                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6905                         type,
6906                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
6907                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6908                 rc = -EIO;
6909                 goto err_exit;
6910         }
6911  err_exit:
6912         lpfc_sli4_mbox_cmd_free(phba, mbox);
6913         return rc;
6914 }
6915
6916 /**
6917  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6918  * @phba: pointer to lpfc hba data structure.
6919  * @pring: Pointer to driver SLI ring object.
6920  * @sgl_list: linked link of sgl buffers to post
6921  * @cnt: number of linked list buffers
6922  *
6923  * This routine walks the list of buffers that have been allocated and
6924  * repost them to the port by using SGL block post. This is needed after a
6925  * pci_function_reset/warm_start or start. It attempts to construct blocks
6926  * of buffer sgls which contains contiguous xris and uses the non-embedded
6927  * SGL block post mailbox commands to post them to the port. For single
6928  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6929  * mailbox command for posting.
6930  *
6931  * Returns: 0 = success, non-zero failure.
6932  **/
6933 static int
6934 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6935                           struct list_head *sgl_list, int cnt)
6936 {
6937         struct lpfc_sglq *sglq_entry = NULL;
6938         struct lpfc_sglq *sglq_entry_next = NULL;
6939         struct lpfc_sglq *sglq_entry_first = NULL;
6940         int status, total_cnt;
6941         int post_cnt = 0, num_posted = 0, block_cnt = 0;
6942         int last_xritag = NO_XRI;
6943         LIST_HEAD(prep_sgl_list);
6944         LIST_HEAD(blck_sgl_list);
6945         LIST_HEAD(allc_sgl_list);
6946         LIST_HEAD(post_sgl_list);
6947         LIST_HEAD(free_sgl_list);
6948
6949         spin_lock_irq(&phba->hbalock);
6950         spin_lock(&phba->sli4_hba.sgl_list_lock);
6951         list_splice_init(sgl_list, &allc_sgl_list);
6952         spin_unlock(&phba->sli4_hba.sgl_list_lock);
6953         spin_unlock_irq(&phba->hbalock);
6954
6955         total_cnt = cnt;
6956         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6957                                  &allc_sgl_list, list) {
6958                 list_del_init(&sglq_entry->list);
6959                 block_cnt++;
6960                 if ((last_xritag != NO_XRI) &&
6961                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
6962                         /* a hole in xri block, form a sgl posting block */
6963                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
6964                         post_cnt = block_cnt - 1;
6965                         /* prepare list for next posting block */
6966                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6967                         block_cnt = 1;
6968                 } else {
6969                         /* prepare list for next posting block */
6970                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
6971                         /* enough sgls for non-embed sgl mbox command */
6972                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6973                                 list_splice_init(&prep_sgl_list,
6974                                                  &blck_sgl_list);
6975                                 post_cnt = block_cnt;
6976                                 block_cnt = 0;
6977                         }
6978                 }
6979                 num_posted++;
6980
6981                 /* keep track of last sgl's xritag */
6982                 last_xritag = sglq_entry->sli4_xritag;
6983
6984                 /* end of repost sgl list condition for buffers */
6985                 if (num_posted == total_cnt) {
6986                         if (post_cnt == 0) {
6987                                 list_splice_init(&prep_sgl_list,
6988                                                  &blck_sgl_list);
6989                                 post_cnt = block_cnt;
6990                         } else if (block_cnt == 1) {
6991                                 status = lpfc_sli4_post_sgl(phba,
6992                                                 sglq_entry->phys, 0,
6993                                                 sglq_entry->sli4_xritag);
6994                                 if (!status) {
6995                                         /* successful, put sgl to posted list */
6996                                         list_add_tail(&sglq_entry->list,
6997                                                       &post_sgl_list);
6998                                 } else {
6999                                         /* Failure, put sgl to free list */
7000                                         lpfc_printf_log(phba, KERN_WARNING,
7001                                                 LOG_SLI,
7002                                                 "3159 Failed to post "
7003                                                 "sgl, xritag:x%x\n",
7004                                                 sglq_entry->sli4_xritag);
7005                                         list_add_tail(&sglq_entry->list,
7006                                                       &free_sgl_list);
7007                                         total_cnt--;
7008                                 }
7009                         }
7010                 }
7011
7012                 /* continue until a nembed page worth of sgls */
7013                 if (post_cnt == 0)
7014                         continue;
7015
7016                 /* post the buffer list sgls as a block */
7017                 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7018                                                  post_cnt);
7019
7020                 if (!status) {
7021                         /* success, put sgl list to posted sgl list */
7022                         list_splice_init(&blck_sgl_list, &post_sgl_list);
7023                 } else {
7024                         /* Failure, put sgl list to free sgl list */
7025                         sglq_entry_first = list_first_entry(&blck_sgl_list,
7026                                                             struct lpfc_sglq,
7027                                                             list);
7028                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7029                                         "3160 Failed to post sgl-list, "
7030                                         "xritag:x%x-x%x\n",
7031                                         sglq_entry_first->sli4_xritag,
7032                                         (sglq_entry_first->sli4_xritag +
7033                                          post_cnt - 1));
7034                         list_splice_init(&blck_sgl_list, &free_sgl_list);
7035                         total_cnt -= post_cnt;
7036                 }
7037
7038                 /* don't reset xirtag due to hole in xri block */
7039                 if (block_cnt == 0)
7040                         last_xritag = NO_XRI;
7041
7042                 /* reset sgl post count for next round of posting */
7043                 post_cnt = 0;
7044         }
7045
7046         /* free the sgls failed to post */
7047         lpfc_free_sgl_list(phba, &free_sgl_list);
7048
7049         /* push sgls posted to the available list */
7050         if (!list_empty(&post_sgl_list)) {
7051                 spin_lock_irq(&phba->hbalock);
7052                 spin_lock(&phba->sli4_hba.sgl_list_lock);
7053                 list_splice_init(&post_sgl_list, sgl_list);
7054                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7055                 spin_unlock_irq(&phba->hbalock);
7056         } else {
7057                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7058                                 "3161 Failure to post sgl to port.\n");
7059                 return -EIO;
7060         }
7061
7062         /* return the number of XRIs actually posted */
7063         return total_cnt;
7064 }
7065
7066 /**
7067  * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7068  * @phba: pointer to lpfc hba data structure.
7069  *
7070  * This routine walks the list of nvme buffers that have been allocated and
7071  * repost them to the port by using SGL block post. This is needed after a
7072  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7073  * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7074  * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7075  *
7076  * Returns: 0 = success, non-zero failure.
7077  **/
7078 static int
7079 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7080 {
7081         LIST_HEAD(post_nblist);
7082         int num_posted, rc = 0;
7083
7084         /* get all NVME buffers need to repost to a local list */
7085         lpfc_io_buf_flush(phba, &post_nblist);
7086
7087         /* post the list of nvme buffer sgls to port if available */
7088         if (!list_empty(&post_nblist)) {
7089                 num_posted = lpfc_sli4_post_io_sgl_list(
7090                         phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7091                 /* failed to post any nvme buffer, return error */
7092                 if (num_posted == 0)
7093                         rc = -EIO;
7094         }
7095         return rc;
7096 }
7097
7098 static void
7099 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7100 {
7101         uint32_t len;
7102
7103         len = sizeof(struct lpfc_mbx_set_host_data) -
7104                 sizeof(struct lpfc_sli4_cfg_mhdr);
7105         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7106                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7107                          LPFC_SLI4_MBX_EMBED);
7108
7109         mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7110         mbox->u.mqe.un.set_host_data.param_len =
7111                                         LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7112         snprintf(mbox->u.mqe.un.set_host_data.data,
7113                  LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7114                  "Linux %s v"LPFC_DRIVER_VERSION,
7115                  (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7116 }
7117
7118 int
7119 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7120                     struct lpfc_queue *drq, int count, int idx)
7121 {
7122         int rc, i;
7123         struct lpfc_rqe hrqe;
7124         struct lpfc_rqe drqe;
7125         struct lpfc_rqb *rqbp;
7126         unsigned long flags;
7127         struct rqb_dmabuf *rqb_buffer;
7128         LIST_HEAD(rqb_buf_list);
7129
7130         spin_lock_irqsave(&phba->hbalock, flags);
7131         rqbp = hrq->rqbp;
7132         for (i = 0; i < count; i++) {
7133                 /* IF RQ is already full, don't bother */
7134                 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7135                         break;
7136                 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7137                 if (!rqb_buffer)
7138                         break;
7139                 rqb_buffer->hrq = hrq;
7140                 rqb_buffer->drq = drq;
7141                 rqb_buffer->idx = idx;
7142                 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7143         }
7144         while (!list_empty(&rqb_buf_list)) {
7145                 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7146                                  hbuf.list);
7147
7148                 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7149                 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7150                 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7151                 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7152                 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7153                 if (rc < 0) {
7154                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7155                                         "6421 Cannot post to HRQ %d: %x %x %x "
7156                                         "DRQ %x %x\n",
7157                                         hrq->queue_id,
7158                                         hrq->host_index,
7159                                         hrq->hba_index,
7160                                         hrq->entry_count,
7161                                         drq->host_index,
7162                                         drq->hba_index);
7163                         rqbp->rqb_free_buffer(phba, rqb_buffer);
7164                 } else {
7165                         list_add_tail(&rqb_buffer->hbuf.list,
7166                                       &rqbp->rqb_buffer_list);
7167                         rqbp->buffer_count++;
7168                 }
7169         }
7170         spin_unlock_irqrestore(&phba->hbalock, flags);
7171         return 1;
7172 }
7173
7174 /**
7175  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7176  * @phba: Pointer to HBA context object.
7177  *
7178  * This function is the main SLI4 device initialization PCI function. This
7179  * function is called by the HBA initialization code, HBA reset code and
7180  * HBA error attention handler code. Caller is not required to hold any
7181  * locks.
7182  **/
7183 int
7184 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7185 {
7186         int rc, i, cnt, len;
7187         LPFC_MBOXQ_t *mboxq;
7188         struct lpfc_mqe *mqe;
7189         uint8_t *vpd;
7190         uint32_t vpd_size;
7191         uint32_t ftr_rsp = 0;
7192         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7193         struct lpfc_vport *vport = phba->pport;
7194         struct lpfc_dmabuf *mp;
7195         struct lpfc_rqb *rqbp;
7196
7197         /* Perform a PCI function reset to start from clean */
7198         rc = lpfc_pci_function_reset(phba);
7199         if (unlikely(rc))
7200                 return -ENODEV;
7201
7202         /* Check the HBA Host Status Register for readyness */
7203         rc = lpfc_sli4_post_status_check(phba);
7204         if (unlikely(rc))
7205                 return -ENODEV;
7206         else {
7207                 spin_lock_irq(&phba->hbalock);
7208                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7209                 spin_unlock_irq(&phba->hbalock);
7210         }
7211
7212         /*
7213          * Allocate a single mailbox container for initializing the
7214          * port.
7215          */
7216         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7217         if (!mboxq)
7218                 return -ENOMEM;
7219
7220         /* Issue READ_REV to collect vpd and FW information. */
7221         vpd_size = SLI4_PAGE_SIZE;
7222         vpd = kzalloc(vpd_size, GFP_KERNEL);
7223         if (!vpd) {
7224                 rc = -ENOMEM;
7225                 goto out_free_mbox;
7226         }
7227
7228         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7229         if (unlikely(rc)) {
7230                 kfree(vpd);
7231                 goto out_free_mbox;
7232         }
7233
7234         mqe = &mboxq->u.mqe;
7235         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7236         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7237                 phba->hba_flag |= HBA_FCOE_MODE;
7238                 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7239         } else {
7240                 phba->hba_flag &= ~HBA_FCOE_MODE;
7241         }
7242
7243         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7244                 LPFC_DCBX_CEE_MODE)
7245                 phba->hba_flag |= HBA_FIP_SUPPORT;
7246         else
7247                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7248
7249         phba->hba_flag &= ~HBA_IOQ_FLUSH;
7250
7251         if (phba->sli_rev != LPFC_SLI_REV4) {
7252                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7253                         "0376 READ_REV Error. SLI Level %d "
7254                         "FCoE enabled %d\n",
7255                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7256                 rc = -EIO;
7257                 kfree(vpd);
7258                 goto out_free_mbox;
7259         }
7260
7261         /*
7262          * Continue initialization with default values even if driver failed
7263          * to read FCoE param config regions, only read parameters if the
7264          * board is FCoE
7265          */
7266         if (phba->hba_flag & HBA_FCOE_MODE &&
7267             lpfc_sli4_read_fcoe_params(phba))
7268                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7269                         "2570 Failed to read FCoE parameters\n");
7270
7271         /*
7272          * Retrieve sli4 device physical port name, failure of doing it
7273          * is considered as non-fatal.
7274          */
7275         rc = lpfc_sli4_retrieve_pport_name(phba);
7276         if (!rc)
7277                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7278                                 "3080 Successful retrieving SLI4 device "
7279                                 "physical port name: %s.\n", phba->Port);
7280
7281         rc = lpfc_sli4_get_ctl_attr(phba);
7282         if (!rc)
7283                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7284                                 "8351 Successful retrieving SLI4 device "
7285                                 "CTL ATTR\n");
7286
7287         /*
7288          * Evaluate the read rev and vpd data. Populate the driver
7289          * state with the results. If this routine fails, the failure
7290          * is not fatal as the driver will use generic values.
7291          */
7292         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7293         if (unlikely(!rc)) {
7294                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7295                                 "0377 Error %d parsing vpd. "
7296                                 "Using defaults.\n", rc);
7297                 rc = 0;
7298         }
7299         kfree(vpd);
7300
7301         /* Save information as VPD data */
7302         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7303         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7304
7305         /*
7306          * This is because first G7 ASIC doesn't support the standard
7307          * 0x5a NVME cmd descriptor type/subtype
7308          */
7309         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7310                         LPFC_SLI_INTF_IF_TYPE_6) &&
7311             (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7312             (phba->vpd.rev.smRev == 0) &&
7313             (phba->cfg_nvme_embed_cmd == 1))
7314                 phba->cfg_nvme_embed_cmd = 0;
7315
7316         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7317         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7318                                          &mqe->un.read_rev);
7319         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7320                                        &mqe->un.read_rev);
7321         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7322                                             &mqe->un.read_rev);
7323         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7324                                            &mqe->un.read_rev);
7325         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7326         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7327         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7328         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7329         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7330         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7331         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7332                         "(%d):0380 READ_REV Status x%x "
7333                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7334                         mboxq->vport ? mboxq->vport->vpi : 0,
7335                         bf_get(lpfc_mqe_status, mqe),
7336                         phba->vpd.rev.opFwName,
7337                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7338                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7339
7340         /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
7341         rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7342         if (phba->pport->cfg_lun_queue_depth > rc) {
7343                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7344                                 "3362 LUN queue depth changed from %d to %d\n",
7345                                 phba->pport->cfg_lun_queue_depth, rc);
7346                 phba->pport->cfg_lun_queue_depth = rc;
7347         }
7348
7349         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7350             LPFC_SLI_INTF_IF_TYPE_0) {
7351                 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7352                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7353                 if (rc == MBX_SUCCESS) {
7354                         phba->hba_flag |= HBA_RECOVERABLE_UE;
7355                         /* Set 1Sec interval to detect UE */
7356                         phba->eratt_poll_interval = 1;
7357                         phba->sli4_hba.ue_to_sr = bf_get(
7358                                         lpfc_mbx_set_feature_UESR,
7359                                         &mboxq->u.mqe.un.set_feature);
7360                         phba->sli4_hba.ue_to_rp = bf_get(
7361                                         lpfc_mbx_set_feature_UERP,
7362                                         &mboxq->u.mqe.un.set_feature);
7363                 }
7364         }
7365
7366         if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7367                 /* Enable MDS Diagnostics only if the SLI Port supports it */
7368                 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7369                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7370                 if (rc != MBX_SUCCESS)
7371                         phba->mds_diags_support = 0;
7372         }
7373
7374         /*
7375          * Discover the port's supported feature set and match it against the
7376          * hosts requests.
7377          */
7378         lpfc_request_features(phba, mboxq);
7379         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7380         if (unlikely(rc)) {
7381                 rc = -EIO;
7382                 goto out_free_mbox;
7383         }
7384
7385         /*
7386          * The port must support FCP initiator mode as this is the
7387          * only mode running in the host.
7388          */
7389         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7390                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7391                                 "0378 No support for fcpi mode.\n");
7392                 ftr_rsp++;
7393         }
7394
7395         /* Performance Hints are ONLY for FCoE */
7396         if (phba->hba_flag & HBA_FCOE_MODE) {
7397                 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7398                         phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7399                 else
7400                         phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7401         }
7402
7403         /*
7404          * If the port cannot support the host's requested features
7405          * then turn off the global config parameters to disable the
7406          * feature in the driver.  This is not a fatal error.
7407          */
7408         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7409                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7410                         phba->cfg_enable_bg = 0;
7411                         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7412                         ftr_rsp++;
7413                 }
7414         }
7415
7416         if (phba->max_vpi && phba->cfg_enable_npiv &&
7417             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7418                 ftr_rsp++;
7419
7420         if (ftr_rsp) {
7421                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7422                                 "0379 Feature Mismatch Data: x%08x %08x "
7423                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7424                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7425                                 phba->cfg_enable_npiv, phba->max_vpi);
7426                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7427                         phba->cfg_enable_bg = 0;
7428                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7429                         phba->cfg_enable_npiv = 0;
7430         }
7431
7432         /* These SLI3 features are assumed in SLI4 */
7433         spin_lock_irq(&phba->hbalock);
7434         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7435         spin_unlock_irq(&phba->hbalock);
7436
7437         /*
7438          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
7439          * calls depends on these resources to complete port setup.
7440          */
7441         rc = lpfc_sli4_alloc_resource_identifiers(phba);
7442         if (rc) {
7443                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7444                                 "2920 Failed to alloc Resource IDs "
7445                                 "rc = x%x\n", rc);
7446                 goto out_free_mbox;
7447         }
7448
7449         lpfc_set_host_data(phba, mboxq);
7450
7451         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7452         if (rc) {
7453                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7454                                 "2134 Failed to set host os driver version %x",
7455                                 rc);
7456         }
7457
7458         /* Read the port's service parameters. */
7459         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7460         if (rc) {
7461                 phba->link_state = LPFC_HBA_ERROR;
7462                 rc = -ENOMEM;
7463                 goto out_free_mbox;
7464         }
7465
7466         mboxq->vport = vport;
7467         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7468         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7469         if (rc == MBX_SUCCESS) {
7470                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7471                 rc = 0;
7472         }
7473
7474         /*
7475          * This memory was allocated by the lpfc_read_sparam routine. Release
7476          * it to the mbuf pool.
7477          */
7478         lpfc_mbuf_free(phba, mp->virt, mp->phys);
7479         kfree(mp);
7480         mboxq->ctx_buf = NULL;
7481         if (unlikely(rc)) {
7482                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7483                                 "0382 READ_SPARAM command failed "
7484                                 "status %d, mbxStatus x%x\n",
7485                                 rc, bf_get(lpfc_mqe_status, mqe));
7486                 phba->link_state = LPFC_HBA_ERROR;
7487                 rc = -EIO;
7488                 goto out_free_mbox;
7489         }
7490
7491         lpfc_update_vport_wwn(vport);
7492
7493         /* Update the fc_host data structures with new wwn. */
7494         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7495         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7496
7497         /* Create all the SLI4 queues */
7498         rc = lpfc_sli4_queue_create(phba);
7499         if (rc) {
7500                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7501                                 "3089 Failed to allocate queues\n");
7502                 rc = -ENODEV;
7503                 goto out_free_mbox;
7504         }
7505         /* Set up all the queues to the device */
7506         rc = lpfc_sli4_queue_setup(phba);
7507         if (unlikely(rc)) {
7508                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7509                                 "0381 Error %d during queue setup.\n ", rc);
7510                 goto out_stop_timers;
7511         }
7512         /* Initialize the driver internal SLI layer lists. */
7513         lpfc_sli4_setup(phba);
7514         lpfc_sli4_queue_init(phba);
7515
7516         /* update host els xri-sgl sizes and mappings */
7517         rc = lpfc_sli4_els_sgl_update(phba);
7518         if (unlikely(rc)) {
7519                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7520                                 "1400 Failed to update xri-sgl size and "
7521                                 "mapping: %d\n", rc);
7522                 goto out_destroy_queue;
7523         }
7524
7525         /* register the els sgl pool to the port */
7526         rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7527                                        phba->sli4_hba.els_xri_cnt);
7528         if (unlikely(rc < 0)) {
7529                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7530                                 "0582 Error %d during els sgl post "
7531                                 "operation\n", rc);
7532                 rc = -ENODEV;
7533                 goto out_destroy_queue;
7534         }
7535         phba->sli4_hba.els_xri_cnt = rc;
7536
7537         if (phba->nvmet_support) {
7538                 /* update host nvmet xri-sgl sizes and mappings */
7539                 rc = lpfc_sli4_nvmet_sgl_update(phba);
7540                 if (unlikely(rc)) {
7541                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7542                                         "6308 Failed to update nvmet-sgl size "
7543                                         "and mapping: %d\n", rc);
7544                         goto out_destroy_queue;
7545                 }
7546
7547                 /* register the nvmet sgl pool to the port */
7548                 rc = lpfc_sli4_repost_sgl_list(
7549                         phba,
7550                         &phba->sli4_hba.lpfc_nvmet_sgl_list,
7551                         phba->sli4_hba.nvmet_xri_cnt);
7552                 if (unlikely(rc < 0)) {
7553                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7554                                         "3117 Error %d during nvmet "
7555                                         "sgl post\n", rc);
7556                         rc = -ENODEV;
7557                         goto out_destroy_queue;
7558                 }
7559                 phba->sli4_hba.nvmet_xri_cnt = rc;
7560
7561                 /* We allocate an iocbq for every receive context SGL.
7562                  * The additional allocation is for abort and ls handling.
7563                  */
7564                 cnt = phba->sli4_hba.nvmet_xri_cnt +
7565                         phba->sli4_hba.max_cfg_param.max_xri;
7566         } else {
7567                 /* update host common xri-sgl sizes and mappings */
7568                 rc = lpfc_sli4_io_sgl_update(phba);
7569                 if (unlikely(rc)) {
7570                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7571                                         "6082 Failed to update nvme-sgl size "
7572                                         "and mapping: %d\n", rc);
7573                         goto out_destroy_queue;
7574                 }
7575
7576                 /* register the allocated common sgl pool to the port */
7577                 rc = lpfc_sli4_repost_io_sgl_list(phba);
7578                 if (unlikely(rc)) {
7579                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7580                                         "6116 Error %d during nvme sgl post "
7581                                         "operation\n", rc);
7582                         /* Some NVME buffers were moved to abort nvme list */
7583                         /* A pci function reset will repost them */
7584                         rc = -ENODEV;
7585                         goto out_destroy_queue;
7586                 }
7587                 /* Each lpfc_io_buf job structure has an iocbq element.
7588                  * This cnt provides for abort, els, ct and ls requests.
7589                  */
7590                 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7591         }
7592
7593         if (!phba->sli.iocbq_lookup) {
7594                 /* Initialize and populate the iocb list per host */
7595                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7596                                 "2821 initialize iocb list with %d entries\n",
7597                                 cnt);
7598                 rc = lpfc_init_iocb_list(phba, cnt);
7599                 if (rc) {
7600                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7601                                         "1413 Failed to init iocb list.\n");
7602                         goto out_destroy_queue;
7603                 }
7604         }
7605
7606         if (phba->nvmet_support)
7607                 lpfc_nvmet_create_targetport(phba);
7608
7609         if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7610                 /* Post initial buffers to all RQs created */
7611                 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7612                         rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7613                         INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7614                         rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7615                         rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7616                         rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7617                         rqbp->buffer_count = 0;
7618
7619                         lpfc_post_rq_buffer(
7620                                 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7621                                 phba->sli4_hba.nvmet_mrq_data[i],
7622                                 phba->cfg_nvmet_mrq_post, i);
7623                 }
7624         }
7625
7626         /* Post the rpi header region to the device. */
7627         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7628         if (unlikely(rc)) {
7629                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7630                                 "0393 Error %d during rpi post operation\n",
7631                                 rc);
7632                 rc = -ENODEV;
7633                 goto out_destroy_queue;
7634         }
7635         lpfc_sli4_node_prep(phba);
7636
7637         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7638                 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7639                         /*
7640                          * The FC Port needs to register FCFI (index 0)
7641                          */
7642                         lpfc_reg_fcfi(phba, mboxq);
7643                         mboxq->vport = phba->pport;
7644                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7645                         if (rc != MBX_SUCCESS)
7646                                 goto out_unset_queue;
7647                         rc = 0;
7648                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7649                                                 &mboxq->u.mqe.un.reg_fcfi);
7650                 } else {
7651                         /* We are a NVME Target mode with MRQ > 1 */
7652
7653                         /* First register the FCFI */
7654                         lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7655                         mboxq->vport = phba->pport;
7656                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7657                         if (rc != MBX_SUCCESS)
7658                                 goto out_unset_queue;
7659                         rc = 0;
7660                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7661                                                 &mboxq->u.mqe.un.reg_fcfi_mrq);
7662
7663                         /* Next register the MRQs */
7664                         lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7665                         mboxq->vport = phba->pport;
7666                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7667                         if (rc != MBX_SUCCESS)
7668                                 goto out_unset_queue;
7669                         rc = 0;
7670                 }
7671                 /* Check if the port is configured to be disabled */
7672                 lpfc_sli_read_link_ste(phba);
7673         }
7674
7675         /* Don't post more new bufs if repost already recovered
7676          * the nvme sgls.
7677          */
7678         if (phba->nvmet_support == 0) {
7679                 if (phba->sli4_hba.io_xri_cnt == 0) {
7680                         len = lpfc_new_io_buf(
7681                                               phba, phba->sli4_hba.io_xri_max);
7682                         if (len == 0) {
7683                                 rc = -ENOMEM;
7684                                 goto out_unset_queue;
7685                         }
7686
7687                         if (phba->cfg_xri_rebalancing)
7688                                 lpfc_create_multixri_pools(phba);
7689                 }
7690         } else {
7691                 phba->cfg_xri_rebalancing = 0;
7692         }
7693
7694         /* Allow asynchronous mailbox command to go through */
7695         spin_lock_irq(&phba->hbalock);
7696         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7697         spin_unlock_irq(&phba->hbalock);
7698
7699         /* Post receive buffers to the device */
7700         lpfc_sli4_rb_setup(phba);
7701
7702         /* Reset HBA FCF states after HBA reset */
7703         phba->fcf.fcf_flag = 0;
7704         phba->fcf.current_rec.flag = 0;
7705
7706         /* Start the ELS watchdog timer */
7707         mod_timer(&vport->els_tmofunc,
7708                   jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7709
7710         /* Start heart beat timer */
7711         mod_timer(&phba->hb_tmofunc,
7712                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7713         phba->hb_outstanding = 0;
7714         phba->last_completion_time = jiffies;
7715
7716         /* start eq_delay heartbeat */
7717         if (phba->cfg_auto_imax)
7718                 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7719                                    msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7720
7721         /* Start error attention (ERATT) polling timer */
7722         mod_timer(&phba->eratt_poll,
7723                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7724
7725         /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7726         if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7727                 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7728                 if (!rc) {
7729                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7730                                         "2829 This device supports "
7731                                         "Advanced Error Reporting (AER)\n");
7732                         spin_lock_irq(&phba->hbalock);
7733                         phba->hba_flag |= HBA_AER_ENABLED;
7734                         spin_unlock_irq(&phba->hbalock);
7735                 } else {
7736                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7737                                         "2830 This device does not support "
7738                                         "Advanced Error Reporting (AER)\n");
7739                         phba->cfg_aer_support = 0;
7740                 }
7741                 rc = 0;
7742         }
7743
7744         /*
7745          * The port is ready, set the host's link state to LINK_DOWN
7746          * in preparation for link interrupts.
7747          */
7748         spin_lock_irq(&phba->hbalock);
7749         phba->link_state = LPFC_LINK_DOWN;
7750
7751         /* Check if physical ports are trunked */
7752         if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7753                 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7754         if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7755                 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7756         if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7757                 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7758         if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7759                 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7760         spin_unlock_irq(&phba->hbalock);
7761
7762         /* Arm the CQs and then EQs on device */
7763         lpfc_sli4_arm_cqeq_intr(phba);
7764
7765         /* Indicate device interrupt mode */
7766         phba->sli4_hba.intr_enable = 1;
7767
7768         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7769             (phba->hba_flag & LINK_DISABLED)) {
7770                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7771                                 "3103 Adapter Link is disabled.\n");
7772                 lpfc_down_link(phba, mboxq);
7773                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7774                 if (rc != MBX_SUCCESS) {
7775                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7776                                         "3104 Adapter failed to issue "
7777                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7778                         goto out_io_buff_free;
7779                 }
7780         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7781                 /* don't perform init_link on SLI4 FC port loopback test */
7782                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7783                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7784                         if (rc)
7785                                 goto out_io_buff_free;
7786                 }
7787         }
7788         mempool_free(mboxq, phba->mbox_mem_pool);
7789         return rc;
7790 out_io_buff_free:
7791         /* Free allocated IO Buffers */
7792         lpfc_io_free(phba);
7793 out_unset_queue:
7794         /* Unset all the queues set up in this routine when error out */
7795         lpfc_sli4_queue_unset(phba);
7796 out_destroy_queue:
7797         lpfc_free_iocb_list(phba);
7798         lpfc_sli4_queue_destroy(phba);
7799 out_stop_timers:
7800         lpfc_stop_hba_timers(phba);
7801 out_free_mbox:
7802         mempool_free(mboxq, phba->mbox_mem_pool);
7803         return rc;
7804 }
7805
7806 /**
7807  * lpfc_mbox_timeout - Timeout call back function for mbox timer
7808  * @ptr: context object - pointer to hba structure.
7809  *
7810  * This is the callback function for mailbox timer. The mailbox
7811  * timer is armed when a new mailbox command is issued and the timer
7812  * is deleted when the mailbox complete. The function is called by
7813  * the kernel timer code when a mailbox does not complete within
7814  * expected time. This function wakes up the worker thread to
7815  * process the mailbox timeout and returns. All the processing is
7816  * done by the worker thread function lpfc_mbox_timeout_handler.
7817  **/
7818 void
7819 lpfc_mbox_timeout(struct timer_list *t)
7820 {
7821         struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
7822         unsigned long iflag;
7823         uint32_t tmo_posted;
7824
7825         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7826         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7827         if (!tmo_posted)
7828                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7829         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7830
7831         if (!tmo_posted)
7832                 lpfc_worker_wake_up(phba);
7833         return;
7834 }
7835
7836 /**
7837  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7838  *                                    are pending
7839  * @phba: Pointer to HBA context object.
7840  *
7841  * This function checks if any mailbox completions are present on the mailbox
7842  * completion queue.
7843  **/
7844 static bool
7845 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7846 {
7847
7848         uint32_t idx;
7849         struct lpfc_queue *mcq;
7850         struct lpfc_mcqe *mcqe;
7851         bool pending_completions = false;
7852         uint8_t qe_valid;
7853
7854         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7855                 return false;
7856
7857         /* Check for completions on mailbox completion queue */
7858
7859         mcq = phba->sli4_hba.mbx_cq;
7860         idx = mcq->hba_index;
7861         qe_valid = mcq->qe_valid;
7862         while (bf_get_le32(lpfc_cqe_valid,
7863                (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7864                 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
7865                 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7866                     (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7867                         pending_completions = true;
7868                         break;
7869                 }
7870                 idx = (idx + 1) % mcq->entry_count;
7871                 if (mcq->hba_index == idx)
7872                         break;
7873
7874                 /* if the index wrapped around, toggle the valid bit */
7875                 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7876                         qe_valid = (qe_valid) ? 0 : 1;
7877         }
7878         return pending_completions;
7879
7880 }
7881
7882 /**
7883  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7884  *                                            that were missed.
7885  * @phba: Pointer to HBA context object.
7886  *
7887  * For sli4, it is possible to miss an interrupt. As such mbox completions
7888  * maybe missed causing erroneous mailbox timeouts to occur. This function
7889  * checks to see if mbox completions are on the mailbox completion queue
7890  * and will process all the completions associated with the eq for the
7891  * mailbox completion queue.
7892  **/
7893 static bool
7894 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7895 {
7896         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7897         uint32_t eqidx;
7898         struct lpfc_queue *fpeq = NULL;
7899         struct lpfc_queue *eq;
7900         bool mbox_pending;
7901
7902         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7903                 return false;
7904
7905         /* Find the EQ associated with the mbox CQ */
7906         if (sli4_hba->hdwq) {
7907                 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7908                         eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
7909                         if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
7910                                 fpeq = eq;
7911                                 break;
7912                         }
7913                 }
7914         }
7915         if (!fpeq)
7916                 return false;
7917
7918         /* Turn off interrupts from this EQ */
7919
7920         sli4_hba->sli4_eq_clr_intr(fpeq);
7921
7922         /* Check to see if a mbox completion is pending */
7923
7924         mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7925
7926         /*
7927          * If a mbox completion is pending, process all the events on EQ
7928          * associated with the mbox completion queue (this could include
7929          * mailbox commands, async events, els commands, receive queue data
7930          * and fcp commands)
7931          */
7932
7933         if (mbox_pending)
7934                 /* process and rearm the EQ */
7935                 lpfc_sli4_process_eq(phba, fpeq);
7936         else
7937                 /* Always clear and re-arm the EQ */
7938                 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7939
7940         return mbox_pending;
7941
7942 }
7943
7944 /**
7945  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7946  * @phba: Pointer to HBA context object.
7947  *
7948  * This function is called from worker thread when a mailbox command times out.
7949  * The caller is not required to hold any locks. This function will reset the
7950  * HBA and recover all the pending commands.
7951  **/
7952 void
7953 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7954 {
7955         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7956         MAILBOX_t *mb = NULL;
7957
7958         struct lpfc_sli *psli = &phba->sli;
7959
7960         /* If the mailbox completed, process the completion and return */
7961         if (lpfc_sli4_process_missed_mbox_completions(phba))
7962                 return;
7963
7964         if (pmbox != NULL)
7965                 mb = &pmbox->u.mb;
7966         /* Check the pmbox pointer first.  There is a race condition
7967          * between the mbox timeout handler getting executed in the
7968          * worklist and the mailbox actually completing. When this
7969          * race condition occurs, the mbox_active will be NULL.
7970          */
7971         spin_lock_irq(&phba->hbalock);
7972         if (pmbox == NULL) {
7973                 lpfc_printf_log(phba, KERN_WARNING,
7974                                 LOG_MBOX | LOG_SLI,
7975                                 "0353 Active Mailbox cleared - mailbox timeout "
7976                                 "exiting\n");
7977                 spin_unlock_irq(&phba->hbalock);
7978                 return;
7979         }
7980
7981         /* Mbox cmd <mbxCommand> timeout */
7982         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7983                         "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
7984                         mb->mbxCommand,
7985                         phba->pport->port_state,
7986                         phba->sli.sli_flag,
7987                         phba->sli.mbox_active);
7988         spin_unlock_irq(&phba->hbalock);
7989
7990         /* Setting state unknown so lpfc_sli_abort_iocb_ring
7991          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7992          * it to fail all outstanding SCSI IO.
7993          */
7994         spin_lock_irq(&phba->pport->work_port_lock);
7995         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7996         spin_unlock_irq(&phba->pport->work_port_lock);
7997         spin_lock_irq(&phba->hbalock);
7998         phba->link_state = LPFC_LINK_UNKNOWN;
7999         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8000         spin_unlock_irq(&phba->hbalock);
8001
8002         lpfc_sli_abort_fcp_rings(phba);
8003
8004         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8005                         "0345 Resetting board due to mailbox timeout\n");
8006
8007         /* Reset the HBA device */
8008         lpfc_reset_hba(phba);
8009 }
8010
8011 /**
8012  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8013  * @phba: Pointer to HBA context object.
8014  * @pmbox: Pointer to mailbox object.
8015  * @flag: Flag indicating how the mailbox need to be processed.
8016  *
8017  * This function is called by discovery code and HBA management code
8018  * to submit a mailbox command to firmware with SLI-3 interface spec. This
8019  * function gets the hbalock to protect the data structures.
8020  * The mailbox command can be submitted in polling mode, in which case
8021  * this function will wait in a polling loop for the completion of the
8022  * mailbox.
8023  * If the mailbox is submitted in no_wait mode (not polling) the
8024  * function will submit the command and returns immediately without waiting
8025  * for the mailbox completion. The no_wait is supported only when HBA
8026  * is in SLI2/SLI3 mode - interrupts are enabled.
8027  * The SLI interface allows only one mailbox pending at a time. If the
8028  * mailbox is issued in polling mode and there is already a mailbox
8029  * pending, then the function will return an error. If the mailbox is issued
8030  * in NO_WAIT mode and there is a mailbox pending already, the function
8031  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8032  * The sli layer owns the mailbox object until the completion of mailbox
8033  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8034  * return codes the caller owns the mailbox command after the return of
8035  * the function.
8036  **/
8037 static int
8038 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8039                        uint32_t flag)
8040 {
8041         MAILBOX_t *mbx;
8042         struct lpfc_sli *psli = &phba->sli;
8043         uint32_t status, evtctr;
8044         uint32_t ha_copy, hc_copy;
8045         int i;
8046         unsigned long timeout;
8047         unsigned long drvr_flag = 0;
8048         uint32_t word0, ldata;
8049         void __iomem *to_slim;
8050         int processing_queue = 0;
8051
8052         spin_lock_irqsave(&phba->hbalock, drvr_flag);
8053         if (!pmbox) {
8054                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8055                 /* processing mbox queue from intr_handler */
8056                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8057                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8058                         return MBX_SUCCESS;
8059                 }
8060                 processing_queue = 1;
8061                 pmbox = lpfc_mbox_get(phba);
8062                 if (!pmbox) {
8063                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8064                         return MBX_SUCCESS;
8065                 }
8066         }
8067
8068         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8069                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8070                 if(!pmbox->vport) {
8071                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8072                         lpfc_printf_log(phba, KERN_ERR,
8073                                         LOG_MBOX | LOG_VPORT,
8074                                         "1806 Mbox x%x failed. No vport\n",
8075                                         pmbox->u.mb.mbxCommand);
8076                         dump_stack();
8077                         goto out_not_finished;
8078                 }
8079         }
8080
8081         /* If the PCI channel is in offline state, do not post mbox. */
8082         if (unlikely(pci_channel_offline(phba->pcidev))) {
8083                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8084                 goto out_not_finished;
8085         }
8086
8087         /* If HBA has a deferred error attention, fail the iocb. */
8088         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8089                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8090                 goto out_not_finished;
8091         }
8092
8093         psli = &phba->sli;
8094
8095         mbx = &pmbox->u.mb;
8096         status = MBX_SUCCESS;
8097
8098         if (phba->link_state == LPFC_HBA_ERROR) {
8099                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8100
8101                 /* Mbox command <mbxCommand> cannot issue */
8102                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8103                                 "(%d):0311 Mailbox command x%x cannot "
8104                                 "issue Data: x%x x%x\n",
8105                                 pmbox->vport ? pmbox->vport->vpi : 0,
8106                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8107                 goto out_not_finished;
8108         }
8109
8110         if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8111                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8112                         !(hc_copy & HC_MBINT_ENA)) {
8113                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8114                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8115                                 "(%d):2528 Mailbox command x%x cannot "
8116                                 "issue Data: x%x x%x\n",
8117                                 pmbox->vport ? pmbox->vport->vpi : 0,
8118                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8119                         goto out_not_finished;
8120                 }
8121         }
8122
8123         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8124                 /* Polling for a mbox command when another one is already active
8125                  * is not allowed in SLI. Also, the driver must have established
8126                  * SLI2 mode to queue and process multiple mbox commands.
8127                  */
8128
8129                 if (flag & MBX_POLL) {
8130                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8131
8132                         /* Mbox command <mbxCommand> cannot issue */
8133                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8134                                         "(%d):2529 Mailbox command x%x "
8135                                         "cannot issue Data: x%x x%x\n",
8136                                         pmbox->vport ? pmbox->vport->vpi : 0,
8137                                         pmbox->u.mb.mbxCommand,
8138                                         psli->sli_flag, flag);
8139                         goto out_not_finished;
8140                 }
8141
8142                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8143                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8144                         /* Mbox command <mbxCommand> cannot issue */
8145                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8146                                         "(%d):2530 Mailbox command x%x "
8147                                         "cannot issue Data: x%x x%x\n",
8148                                         pmbox->vport ? pmbox->vport->vpi : 0,
8149                                         pmbox->u.mb.mbxCommand,
8150                                         psli->sli_flag, flag);
8151                         goto out_not_finished;
8152                 }
8153
8154                 /* Another mailbox command is still being processed, queue this
8155                  * command to be processed later.
8156                  */
8157                 lpfc_mbox_put(phba, pmbox);
8158
8159                 /* Mbox cmd issue - BUSY */
8160                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8161                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
8162                                 "x%x x%x x%x x%x\n",
8163                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8164                                 mbx->mbxCommand,
8165                                 phba->pport ? phba->pport->port_state : 0xff,
8166                                 psli->sli_flag, flag);
8167
8168                 psli->slistat.mbox_busy++;
8169                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8170
8171                 if (pmbox->vport) {
8172                         lpfc_debugfs_disc_trc(pmbox->vport,
8173                                 LPFC_DISC_TRC_MBOX_VPORT,
8174                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
8175                                 (uint32_t)mbx->mbxCommand,
8176                                 mbx->un.varWords[0], mbx->un.varWords[1]);
8177                 }
8178                 else {
8179                         lpfc_debugfs_disc_trc(phba->pport,
8180                                 LPFC_DISC_TRC_MBOX,
8181                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
8182                                 (uint32_t)mbx->mbxCommand,
8183                                 mbx->un.varWords[0], mbx->un.varWords[1]);
8184                 }
8185
8186                 return MBX_BUSY;
8187         }
8188
8189         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8190
8191         /* If we are not polling, we MUST be in SLI2 mode */
8192         if (flag != MBX_POLL) {
8193                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8194                     (mbx->mbxCommand != MBX_KILL_BOARD)) {
8195                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8196                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8197                         /* Mbox command <mbxCommand> cannot issue */
8198                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8199                                         "(%d):2531 Mailbox command x%x "
8200                                         "cannot issue Data: x%x x%x\n",
8201                                         pmbox->vport ? pmbox->vport->vpi : 0,
8202                                         pmbox->u.mb.mbxCommand,
8203                                         psli->sli_flag, flag);
8204                         goto out_not_finished;
8205                 }
8206                 /* timeout active mbox command */
8207                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8208                                            1000);
8209                 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8210         }
8211
8212         /* Mailbox cmd <cmd> issue */
8213         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8214                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8215                         "x%x\n",
8216                         pmbox->vport ? pmbox->vport->vpi : 0,
8217                         mbx->mbxCommand,
8218                         phba->pport ? phba->pport->port_state : 0xff,
8219                         psli->sli_flag, flag);
8220
8221         if (mbx->mbxCommand != MBX_HEARTBEAT) {
8222                 if (pmbox->vport) {
8223                         lpfc_debugfs_disc_trc(pmbox->vport,
8224                                 LPFC_DISC_TRC_MBOX_VPORT,
8225                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8226                                 (uint32_t)mbx->mbxCommand,
8227                                 mbx->un.varWords[0], mbx->un.varWords[1]);
8228                 }
8229                 else {
8230                         lpfc_debugfs_disc_trc(phba->pport,
8231                                 LPFC_DISC_TRC_MBOX,
8232                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
8233                                 (uint32_t)mbx->mbxCommand,
8234                                 mbx->un.varWords[0], mbx->un.varWords[1]);
8235                 }
8236         }
8237
8238         psli->slistat.mbox_cmd++;
8239         evtctr = psli->slistat.mbox_event;
8240
8241         /* next set own bit for the adapter and copy over command word */
8242         mbx->mbxOwner = OWN_CHIP;
8243
8244         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8245                 /* Populate mbox extension offset word. */
8246                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8247                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8248                                 = (uint8_t *)phba->mbox_ext
8249                                   - (uint8_t *)phba->mbox;
8250                 }
8251
8252                 /* Copy the mailbox extension data */
8253                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8254                         lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8255                                               (uint8_t *)phba->mbox_ext,
8256                                               pmbox->in_ext_byte_len);
8257                 }
8258                 /* Copy command data to host SLIM area */
8259                 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8260         } else {
8261                 /* Populate mbox extension offset word. */
8262                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8263                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8264                                 = MAILBOX_HBA_EXT_OFFSET;
8265
8266                 /* Copy the mailbox extension data */
8267                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8268                         lpfc_memcpy_to_slim(phba->MBslimaddr +
8269                                 MAILBOX_HBA_EXT_OFFSET,
8270                                 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8271
8272                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8273                         /* copy command data into host mbox for cmpl */
8274                         lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8275                                               MAILBOX_CMD_SIZE);
8276
8277                 /* First copy mbox command data to HBA SLIM, skip past first
8278                    word */
8279                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8280                 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8281                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
8282
8283                 /* Next copy over first word, with mbxOwner set */
8284                 ldata = *((uint32_t *)mbx);
8285                 to_slim = phba->MBslimaddr;
8286                 writel(ldata, to_slim);
8287                 readl(to_slim); /* flush */
8288
8289                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8290                         /* switch over to host mailbox */
8291                         psli->sli_flag |= LPFC_SLI_ACTIVE;
8292         }
8293
8294         wmb();
8295
8296         switch (flag) {
8297         case MBX_NOWAIT:
8298                 /* Set up reference to mailbox command */
8299                 psli->mbox_active = pmbox;
8300                 /* Interrupt board to do it */
8301                 writel(CA_MBATT, phba->CAregaddr);
8302                 readl(phba->CAregaddr); /* flush */
8303                 /* Don't wait for it to finish, just return */
8304                 break;
8305
8306         case MBX_POLL:
8307                 /* Set up null reference to mailbox command */
8308                 psli->mbox_active = NULL;
8309                 /* Interrupt board to do it */
8310                 writel(CA_MBATT, phba->CAregaddr);
8311                 readl(phba->CAregaddr); /* flush */
8312
8313                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8314                         /* First read mbox status word */
8315                         word0 = *((uint32_t *)phba->mbox);
8316                         word0 = le32_to_cpu(word0);
8317                 } else {
8318                         /* First read mbox status word */
8319                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
8320                                 spin_unlock_irqrestore(&phba->hbalock,
8321                                                        drvr_flag);
8322                                 goto out_not_finished;
8323                         }
8324                 }
8325
8326                 /* Read the HBA Host Attention Register */
8327                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8328                         spin_unlock_irqrestore(&phba->hbalock,
8329                                                        drvr_flag);
8330                         goto out_not_finished;
8331                 }
8332                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8333                                                         1000) + jiffies;
8334                 i = 0;
8335                 /* Wait for command to complete */
8336                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8337                        (!(ha_copy & HA_MBATT) &&
8338                         (phba->link_state > LPFC_WARM_START))) {
8339                         if (time_after(jiffies, timeout)) {
8340                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8341                                 spin_unlock_irqrestore(&phba->hbalock,
8342                                                        drvr_flag);
8343                                 goto out_not_finished;
8344                         }
8345
8346                         /* Check if we took a mbox interrupt while we were
8347                            polling */
8348                         if (((word0 & OWN_CHIP) != OWN_CHIP)
8349                             && (evtctr != psli->slistat.mbox_event))
8350                                 break;
8351
8352                         if (i++ > 10) {
8353                                 spin_unlock_irqrestore(&phba->hbalock,
8354                                                        drvr_flag);
8355                                 msleep(1);
8356                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8357                         }
8358
8359                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8360                                 /* First copy command data */
8361                                 word0 = *((uint32_t *)phba->mbox);
8362                                 word0 = le32_to_cpu(word0);
8363                                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8364                                         MAILBOX_t *slimmb;
8365                                         uint32_t slimword0;
8366                                         /* Check real SLIM for any errors */
8367                                         slimword0 = readl(phba->MBslimaddr);
8368                                         slimmb = (MAILBOX_t *) & slimword0;
8369                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8370                                             && slimmb->mbxStatus) {
8371                                                 psli->sli_flag &=
8372                                                     ~LPFC_SLI_ACTIVE;
8373                                                 word0 = slimword0;
8374                                         }
8375                                 }
8376                         } else {
8377                                 /* First copy command data */
8378                                 word0 = readl(phba->MBslimaddr);
8379                         }
8380                         /* Read the HBA Host Attention Register */
8381                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8382                                 spin_unlock_irqrestore(&phba->hbalock,
8383                                                        drvr_flag);
8384                                 goto out_not_finished;
8385                         }
8386                 }
8387
8388                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8389                         /* copy results back to user */
8390                         lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8391                                                 MAILBOX_CMD_SIZE);
8392                         /* Copy the mailbox extension data */
8393                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8394                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8395                                                       pmbox->ctx_buf,
8396                                                       pmbox->out_ext_byte_len);
8397                         }
8398                 } else {
8399                         /* First copy command data */
8400                         lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8401                                                 MAILBOX_CMD_SIZE);
8402                         /* Copy the mailbox extension data */
8403                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8404                                 lpfc_memcpy_from_slim(
8405                                         pmbox->ctx_buf,
8406                                         phba->MBslimaddr +
8407                                         MAILBOX_HBA_EXT_OFFSET,
8408                                         pmbox->out_ext_byte_len);
8409                         }
8410                 }
8411
8412                 writel(HA_MBATT, phba->HAregaddr);
8413                 readl(phba->HAregaddr); /* flush */
8414
8415                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8416                 status = mbx->mbxStatus;
8417         }
8418
8419         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8420         return status;
8421
8422 out_not_finished:
8423         if (processing_queue) {
8424                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8425                 lpfc_mbox_cmpl_put(phba, pmbox);
8426         }
8427         return MBX_NOT_FINISHED;
8428 }
8429
8430 /**
8431  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8432  * @phba: Pointer to HBA context object.
8433  *
8434  * The function blocks the posting of SLI4 asynchronous mailbox commands from
8435  * the driver internal pending mailbox queue. It will then try to wait out the
8436  * possible outstanding mailbox command before return.
8437  *
8438  * Returns:
8439  *      0 - the outstanding mailbox command completed; otherwise, the wait for
8440  *      the outstanding mailbox command timed out.
8441  **/
8442 static int
8443 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8444 {
8445         struct lpfc_sli *psli = &phba->sli;
8446         int rc = 0;
8447         unsigned long timeout = 0;
8448
8449         /* Mark the asynchronous mailbox command posting as blocked */
8450         spin_lock_irq(&phba->hbalock);
8451         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8452         /* Determine how long we might wait for the active mailbox
8453          * command to be gracefully completed by firmware.
8454          */
8455         if (phba->sli.mbox_active)
8456                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8457                                                 phba->sli.mbox_active) *
8458                                                 1000) + jiffies;
8459         spin_unlock_irq(&phba->hbalock);
8460
8461         /* Make sure the mailbox is really active */
8462         if (timeout)
8463                 lpfc_sli4_process_missed_mbox_completions(phba);
8464
8465         /* Wait for the outstnading mailbox command to complete */
8466         while (phba->sli.mbox_active) {
8467                 /* Check active mailbox complete status every 2ms */
8468                 msleep(2);
8469                 if (time_after(jiffies, timeout)) {
8470                         /* Timeout, marked the outstanding cmd not complete */
8471                         rc = 1;
8472                         break;
8473                 }
8474         }
8475
8476         /* Can not cleanly block async mailbox command, fails it */
8477         if (rc) {
8478                 spin_lock_irq(&phba->hbalock);
8479                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8480                 spin_unlock_irq(&phba->hbalock);
8481         }
8482         return rc;
8483 }
8484
8485 /**
8486  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8487  * @phba: Pointer to HBA context object.
8488  *
8489  * The function unblocks and resume posting of SLI4 asynchronous mailbox
8490  * commands from the driver internal pending mailbox queue. It makes sure
8491  * that there is no outstanding mailbox command before resuming posting
8492  * asynchronous mailbox commands. If, for any reason, there is outstanding
8493  * mailbox command, it will try to wait it out before resuming asynchronous
8494  * mailbox command posting.
8495  **/
8496 static void
8497 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8498 {
8499         struct lpfc_sli *psli = &phba->sli;
8500
8501         spin_lock_irq(&phba->hbalock);
8502         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8503                 /* Asynchronous mailbox posting is not blocked, do nothing */
8504                 spin_unlock_irq(&phba->hbalock);
8505                 return;
8506         }
8507
8508         /* Outstanding synchronous mailbox command is guaranteed to be done,
8509          * successful or timeout, after timing-out the outstanding mailbox
8510          * command shall always be removed, so just unblock posting async
8511          * mailbox command and resume
8512          */
8513         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8514         spin_unlock_irq(&phba->hbalock);
8515
8516         /* wake up worker thread to post asynchronlous mailbox command */
8517         lpfc_worker_wake_up(phba);
8518 }
8519
8520 /**
8521  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8522  * @phba: Pointer to HBA context object.
8523  * @mboxq: Pointer to mailbox object.
8524  *
8525  * The function waits for the bootstrap mailbox register ready bit from
8526  * port for twice the regular mailbox command timeout value.
8527  *
8528  *      0 - no timeout on waiting for bootstrap mailbox register ready.
8529  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8530  **/
8531 static int
8532 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8533 {
8534         uint32_t db_ready;
8535         unsigned long timeout;
8536         struct lpfc_register bmbx_reg;
8537
8538         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8539                                    * 1000) + jiffies;
8540
8541         do {
8542                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8543                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8544                 if (!db_ready)
8545                         mdelay(2);
8546
8547                 if (time_after(jiffies, timeout))
8548                         return MBXERR_ERROR;
8549         } while (!db_ready);
8550
8551         return 0;
8552 }
8553
8554 /**
8555  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8556  * @phba: Pointer to HBA context object.
8557  * @mboxq: Pointer to mailbox object.
8558  *
8559  * The function posts a mailbox to the port.  The mailbox is expected
8560  * to be comletely filled in and ready for the port to operate on it.
8561  * This routine executes a synchronous completion operation on the
8562  * mailbox by polling for its completion.
8563  *
8564  * The caller must not be holding any locks when calling this routine.
8565  *
8566  * Returns:
8567  *      MBX_SUCCESS - mailbox posted successfully
8568  *      Any of the MBX error values.
8569  **/
8570 static int
8571 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8572 {
8573         int rc = MBX_SUCCESS;
8574         unsigned long iflag;
8575         uint32_t mcqe_status;
8576         uint32_t mbx_cmnd;
8577         struct lpfc_sli *psli = &phba->sli;
8578         struct lpfc_mqe *mb = &mboxq->u.mqe;
8579         struct lpfc_bmbx_create *mbox_rgn;
8580         struct dma_address *dma_address;
8581
8582         /*
8583          * Only one mailbox can be active to the bootstrap mailbox region
8584          * at a time and there is no queueing provided.
8585          */
8586         spin_lock_irqsave(&phba->hbalock, iflag);
8587         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8588                 spin_unlock_irqrestore(&phba->hbalock, iflag);
8589                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8590                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8591                                 "cannot issue Data: x%x x%x\n",
8592                                 mboxq->vport ? mboxq->vport->vpi : 0,
8593                                 mboxq->u.mb.mbxCommand,
8594                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8595                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8596                                 psli->sli_flag, MBX_POLL);
8597                 return MBXERR_ERROR;
8598         }
8599         /* The server grabs the token and owns it until release */
8600         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8601         phba->sli.mbox_active = mboxq;
8602         spin_unlock_irqrestore(&phba->hbalock, iflag);
8603
8604         /* wait for bootstrap mbox register for readyness */
8605         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8606         if (rc)
8607                 goto exit;
8608         /*
8609          * Initialize the bootstrap memory region to avoid stale data areas
8610          * in the mailbox post.  Then copy the caller's mailbox contents to
8611          * the bmbx mailbox region.
8612          */
8613         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8614         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8615         lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8616                                sizeof(struct lpfc_mqe));
8617
8618         /* Post the high mailbox dma address to the port and wait for ready. */
8619         dma_address = &phba->sli4_hba.bmbx.dma_address;
8620         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8621
8622         /* wait for bootstrap mbox register for hi-address write done */
8623         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8624         if (rc)
8625                 goto exit;
8626
8627         /* Post the low mailbox dma address to the port. */
8628         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8629
8630         /* wait for bootstrap mbox register for low address write done */
8631         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8632         if (rc)
8633                 goto exit;
8634
8635         /*
8636          * Read the CQ to ensure the mailbox has completed.
8637          * If so, update the mailbox status so that the upper layers
8638          * can complete the request normally.
8639          */
8640         lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8641                                sizeof(struct lpfc_mqe));
8642         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8643         lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8644                                sizeof(struct lpfc_mcqe));
8645         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8646         /*
8647          * When the CQE status indicates a failure and the mailbox status
8648          * indicates success then copy the CQE status into the mailbox status
8649          * (and prefix it with x4000).
8650          */
8651         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8652                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8653                         bf_set(lpfc_mqe_status, mb,
8654                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
8655                 rc = MBXERR_ERROR;
8656         } else
8657                 lpfc_sli4_swap_str(phba, mboxq);
8658
8659         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8660                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8661                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8662                         " x%x x%x CQ: x%x x%x x%x x%x\n",
8663                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8664                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8665                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8666                         bf_get(lpfc_mqe_status, mb),
8667                         mb->un.mb_words[0], mb->un.mb_words[1],
8668                         mb->un.mb_words[2], mb->un.mb_words[3],
8669                         mb->un.mb_words[4], mb->un.mb_words[5],
8670                         mb->un.mb_words[6], mb->un.mb_words[7],
8671                         mb->un.mb_words[8], mb->un.mb_words[9],
8672                         mb->un.mb_words[10], mb->un.mb_words[11],
8673                         mb->un.mb_words[12], mboxq->mcqe.word0,
8674                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
8675                         mboxq->mcqe.trailer);
8676 exit:
8677         /* We are holding the token, no needed for lock when release */
8678         spin_lock_irqsave(&phba->hbalock, iflag);
8679         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8680         phba->sli.mbox_active = NULL;
8681         spin_unlock_irqrestore(&phba->hbalock, iflag);
8682         return rc;
8683 }
8684
8685 /**
8686  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8687  * @phba: Pointer to HBA context object.
8688  * @pmbox: Pointer to mailbox object.
8689  * @flag: Flag indicating how the mailbox need to be processed.
8690  *
8691  * This function is called by discovery code and HBA management code to submit
8692  * a mailbox command to firmware with SLI-4 interface spec.
8693  *
8694  * Return codes the caller owns the mailbox command after the return of the
8695  * function.
8696  **/
8697 static int
8698 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8699                        uint32_t flag)
8700 {
8701         struct lpfc_sli *psli = &phba->sli;
8702         unsigned long iflags;
8703         int rc;
8704
8705         /* dump from issue mailbox command if setup */
8706         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8707
8708         rc = lpfc_mbox_dev_check(phba);
8709         if (unlikely(rc)) {
8710                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8711                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8712                                 "cannot issue Data: x%x x%x\n",
8713                                 mboxq->vport ? mboxq->vport->vpi : 0,
8714                                 mboxq->u.mb.mbxCommand,
8715                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8716                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8717                                 psli->sli_flag, flag);
8718                 goto out_not_finished;
8719         }
8720
8721         /* Detect polling mode and jump to a handler */
8722         if (!phba->sli4_hba.intr_enable) {
8723                 if (flag == MBX_POLL)
8724                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8725                 else
8726                         rc = -EIO;
8727                 if (rc != MBX_SUCCESS)
8728                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8729                                         "(%d):2541 Mailbox command x%x "
8730                                         "(x%x/x%x) failure: "
8731                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
8732                                         "Data: x%x x%x\n,",
8733                                         mboxq->vport ? mboxq->vport->vpi : 0,
8734                                         mboxq->u.mb.mbxCommand,
8735                                         lpfc_sli_config_mbox_subsys_get(phba,
8736                                                                         mboxq),
8737                                         lpfc_sli_config_mbox_opcode_get(phba,
8738                                                                         mboxq),
8739                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8740                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8741                                         bf_get(lpfc_mcqe_ext_status,
8742                                                &mboxq->mcqe),
8743                                         psli->sli_flag, flag);
8744                 return rc;
8745         } else if (flag == MBX_POLL) {
8746                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8747                                 "(%d):2542 Try to issue mailbox command "
8748                                 "x%x (x%x/x%x) synchronously ahead of async "
8749                                 "mailbox command queue: x%x x%x\n",
8750                                 mboxq->vport ? mboxq->vport->vpi : 0,
8751                                 mboxq->u.mb.mbxCommand,
8752                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8753                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8754                                 psli->sli_flag, flag);
8755                 /* Try to block the asynchronous mailbox posting */
8756                 rc = lpfc_sli4_async_mbox_block(phba);
8757                 if (!rc) {
8758                         /* Successfully blocked, now issue sync mbox cmd */
8759                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8760                         if (rc != MBX_SUCCESS)
8761                                 lpfc_printf_log(phba, KERN_WARNING,
8762                                         LOG_MBOX | LOG_SLI,
8763                                         "(%d):2597 Sync Mailbox command "
8764                                         "x%x (x%x/x%x) failure: "
8765                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
8766                                         "Data: x%x x%x\n,",
8767                                         mboxq->vport ? mboxq->vport->vpi : 0,
8768                                         mboxq->u.mb.mbxCommand,
8769                                         lpfc_sli_config_mbox_subsys_get(phba,
8770                                                                         mboxq),
8771                                         lpfc_sli_config_mbox_opcode_get(phba,
8772                                                                         mboxq),
8773                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8774                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8775                                         bf_get(lpfc_mcqe_ext_status,
8776                                                &mboxq->mcqe),
8777                                         psli->sli_flag, flag);
8778                         /* Unblock the async mailbox posting afterward */
8779                         lpfc_sli4_async_mbox_unblock(phba);
8780                 }
8781                 return rc;
8782         }
8783
8784         /* Now, interrupt mode asynchrous mailbox command */
8785         rc = lpfc_mbox_cmd_check(phba, mboxq);
8786         if (rc) {
8787                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8788                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8789                                 "cannot issue Data: x%x x%x\n",
8790                                 mboxq->vport ? mboxq->vport->vpi : 0,
8791                                 mboxq->u.mb.mbxCommand,
8792                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8793                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8794                                 psli->sli_flag, flag);
8795                 goto out_not_finished;
8796         }
8797
8798         /* Put the mailbox command to the driver internal FIFO */
8799         psli->slistat.mbox_busy++;
8800         spin_lock_irqsave(&phba->hbalock, iflags);
8801         lpfc_mbox_put(phba, mboxq);
8802         spin_unlock_irqrestore(&phba->hbalock, iflags);
8803         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8804                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
8805                         "x%x (x%x/x%x) x%x x%x x%x\n",
8806                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8807                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8808                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8809                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8810                         phba->pport->port_state,
8811                         psli->sli_flag, MBX_NOWAIT);
8812         /* Wake up worker thread to transport mailbox command from head */
8813         lpfc_worker_wake_up(phba);
8814
8815         return MBX_BUSY;
8816
8817 out_not_finished:
8818         return MBX_NOT_FINISHED;
8819 }
8820
8821 /**
8822  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8823  * @phba: Pointer to HBA context object.
8824  *
8825  * This function is called by worker thread to send a mailbox command to
8826  * SLI4 HBA firmware.
8827  *
8828  **/
8829 int
8830 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8831 {
8832         struct lpfc_sli *psli = &phba->sli;
8833         LPFC_MBOXQ_t *mboxq;
8834         int rc = MBX_SUCCESS;
8835         unsigned long iflags;
8836         struct lpfc_mqe *mqe;
8837         uint32_t mbx_cmnd;
8838
8839         /* Check interrupt mode before post async mailbox command */
8840         if (unlikely(!phba->sli4_hba.intr_enable))
8841                 return MBX_NOT_FINISHED;
8842
8843         /* Check for mailbox command service token */
8844         spin_lock_irqsave(&phba->hbalock, iflags);
8845         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8846                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8847                 return MBX_NOT_FINISHED;
8848         }
8849         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8850                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8851                 return MBX_NOT_FINISHED;
8852         }
8853         if (unlikely(phba->sli.mbox_active)) {
8854                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8855                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8856                                 "0384 There is pending active mailbox cmd\n");
8857                 return MBX_NOT_FINISHED;
8858         }
8859         /* Take the mailbox command service token */
8860         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8861
8862         /* Get the next mailbox command from head of queue */
8863         mboxq = lpfc_mbox_get(phba);
8864
8865         /* If no more mailbox command waiting for post, we're done */
8866         if (!mboxq) {
8867                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8868                 spin_unlock_irqrestore(&phba->hbalock, iflags);
8869                 return MBX_SUCCESS;
8870         }
8871         phba->sli.mbox_active = mboxq;
8872         spin_unlock_irqrestore(&phba->hbalock, iflags);
8873
8874         /* Check device readiness for posting mailbox command */
8875         rc = lpfc_mbox_dev_check(phba);
8876         if (unlikely(rc))
8877                 /* Driver clean routine will clean up pending mailbox */
8878                 goto out_not_finished;
8879
8880         /* Prepare the mbox command to be posted */
8881         mqe = &mboxq->u.mqe;
8882         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8883
8884         /* Start timer for the mbox_tmo and log some mailbox post messages */
8885         mod_timer(&psli->mbox_tmo, (jiffies +
8886                   msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8887
8888         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8889                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8890                         "x%x x%x\n",
8891                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8892                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8893                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8894                         phba->pport->port_state, psli->sli_flag);
8895
8896         if (mbx_cmnd != MBX_HEARTBEAT) {
8897                 if (mboxq->vport) {
8898                         lpfc_debugfs_disc_trc(mboxq->vport,
8899                                 LPFC_DISC_TRC_MBOX_VPORT,
8900                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8901                                 mbx_cmnd, mqe->un.mb_words[0],
8902                                 mqe->un.mb_words[1]);
8903                 } else {
8904                         lpfc_debugfs_disc_trc(phba->pport,
8905                                 LPFC_DISC_TRC_MBOX,
8906                                 "MBOX Send: cmd:x%x mb:x%x x%x",
8907                                 mbx_cmnd, mqe->un.mb_words[0],
8908                                 mqe->un.mb_words[1]);
8909                 }
8910         }
8911         psli->slistat.mbox_cmd++;
8912
8913         /* Post the mailbox command to the port */
8914         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8915         if (rc != MBX_SUCCESS) {
8916                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8917                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8918                                 "cannot issue Data: x%x x%x\n",
8919                                 mboxq->vport ? mboxq->vport->vpi : 0,
8920                                 mboxq->u.mb.mbxCommand,
8921                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8922                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8923                                 psli->sli_flag, MBX_NOWAIT);
8924                 goto out_not_finished;
8925         }
8926
8927         return rc;
8928
8929 out_not_finished:
8930         spin_lock_irqsave(&phba->hbalock, iflags);
8931         if (phba->sli.mbox_active) {
8932                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8933                 __lpfc_mbox_cmpl_put(phba, mboxq);
8934                 /* Release the token */
8935                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8936                 phba->sli.mbox_active = NULL;
8937         }
8938         spin_unlock_irqrestore(&phba->hbalock, iflags);
8939
8940         return MBX_NOT_FINISHED;
8941 }
8942
8943 /**
8944  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8945  * @phba: Pointer to HBA context object.
8946  * @pmbox: Pointer to mailbox object.
8947  * @flag: Flag indicating how the mailbox need to be processed.
8948  *
8949  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8950  * the API jump table function pointer from the lpfc_hba struct.
8951  *
8952  * Return codes the caller owns the mailbox command after the return of the
8953  * function.
8954  **/
8955 int
8956 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8957 {
8958         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8959 }
8960
8961 /**
8962  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8963  * @phba: The hba struct for which this call is being executed.
8964  * @dev_grp: The HBA PCI-Device group number.
8965  *
8966  * This routine sets up the mbox interface API function jump table in @phba
8967  * struct.
8968  * Returns: 0 - success, -ENODEV - failure.
8969  **/
8970 int
8971 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8972 {
8973
8974         switch (dev_grp) {
8975         case LPFC_PCI_DEV_LP:
8976                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8977                 phba->lpfc_sli_handle_slow_ring_event =
8978                                 lpfc_sli_handle_slow_ring_event_s3;
8979                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8980                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8981                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8982                 break;
8983         case LPFC_PCI_DEV_OC:
8984                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8985                 phba->lpfc_sli_handle_slow_ring_event =
8986                                 lpfc_sli_handle_slow_ring_event_s4;
8987                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8988                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8989                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8990                 break;
8991         default:
8992                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8993                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
8994                                 dev_grp);
8995                 return -ENODEV;
8996                 break;
8997         }
8998         return 0;
8999 }
9000
9001 /**
9002  * __lpfc_sli_ringtx_put - Add an iocb to the txq
9003  * @phba: Pointer to HBA context object.
9004  * @pring: Pointer to driver SLI ring object.
9005  * @piocb: Pointer to address of newly added command iocb.
9006  *
9007  * This function is called with hbalock held to add a command
9008  * iocb to the txq when SLI layer cannot submit the command iocb
9009  * to the ring.
9010  **/
9011 void
9012 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9013                     struct lpfc_iocbq *piocb)
9014 {
9015         lockdep_assert_held(&phba->hbalock);
9016         /* Insert the caller's iocb in the txq tail for later processing. */
9017         list_add_tail(&piocb->list, &pring->txq);
9018 }
9019
9020 /**
9021  * lpfc_sli_next_iocb - Get the next iocb in the txq
9022  * @phba: Pointer to HBA context object.
9023  * @pring: Pointer to driver SLI ring object.
9024  * @piocb: Pointer to address of newly added command iocb.
9025  *
9026  * This function is called with hbalock held before a new
9027  * iocb is submitted to the firmware. This function checks
9028  * txq to flush the iocbs in txq to Firmware before
9029  * submitting new iocbs to the Firmware.
9030  * If there are iocbs in the txq which need to be submitted
9031  * to firmware, lpfc_sli_next_iocb returns the first element
9032  * of the txq after dequeuing it from txq.
9033  * If there is no iocb in the txq then the function will return
9034  * *piocb and *piocb is set to NULL. Caller needs to check
9035  * *piocb to find if there are more commands in the txq.
9036  **/
9037 static struct lpfc_iocbq *
9038 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9039                    struct lpfc_iocbq **piocb)
9040 {
9041         struct lpfc_iocbq * nextiocb;
9042
9043         lockdep_assert_held(&phba->hbalock);
9044
9045         nextiocb = lpfc_sli_ringtx_get(phba, pring);
9046         if (!nextiocb) {
9047                 nextiocb = *piocb;
9048                 *piocb = NULL;
9049         }
9050
9051         return nextiocb;
9052 }
9053
9054 /**
9055  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9056  * @phba: Pointer to HBA context object.
9057  * @ring_number: SLI ring number to issue iocb on.
9058  * @piocb: Pointer to command iocb.
9059  * @flag: Flag indicating if this command can be put into txq.
9060  *
9061  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9062  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9063  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9064  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9065  * this function allows only iocbs for posting buffers. This function finds
9066  * next available slot in the command ring and posts the command to the
9067  * available slot and writes the port attention register to request HBA start
9068  * processing new iocb. If there is no slot available in the ring and
9069  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9070  * the function returns IOCB_BUSY.
9071  *
9072  * This function is called with hbalock held. The function will return success
9073  * after it successfully submit the iocb to firmware or after adding to the
9074  * txq.
9075  **/
9076 static int
9077 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9078                     struct lpfc_iocbq *piocb, uint32_t flag)
9079 {
9080         struct lpfc_iocbq *nextiocb;
9081         IOCB_t *iocb;
9082         struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9083
9084         lockdep_assert_held(&phba->hbalock);
9085
9086         if (piocb->iocb_cmpl && (!piocb->vport) &&
9087            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9088            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9089                 lpfc_printf_log(phba, KERN_ERR,
9090                                 LOG_SLI | LOG_VPORT,
9091                                 "1807 IOCB x%x failed. No vport\n",
9092                                 piocb->iocb.ulpCommand);
9093                 dump_stack();
9094                 return IOCB_ERROR;
9095         }
9096
9097
9098         /* If the PCI channel is in offline state, do not post iocbs. */
9099         if (unlikely(pci_channel_offline(phba->pcidev)))
9100                 return IOCB_ERROR;
9101
9102         /* If HBA has a deferred error attention, fail the iocb. */
9103         if (unlikely(phba->hba_flag & DEFER_ERATT))
9104                 return IOCB_ERROR;
9105
9106         /*
9107          * We should never get an IOCB if we are in a < LINK_DOWN state
9108          */
9109         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9110                 return IOCB_ERROR;
9111
9112         /*
9113          * Check to see if we are blocking IOCB processing because of a
9114          * outstanding event.
9115          */
9116         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9117                 goto iocb_busy;
9118
9119         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9120                 /*
9121                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9122                  * can be issued if the link is not up.
9123                  */
9124                 switch (piocb->iocb.ulpCommand) {
9125                 case CMD_GEN_REQUEST64_CR:
9126                 case CMD_GEN_REQUEST64_CX:
9127                         if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9128                                 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9129                                         FC_RCTL_DD_UNSOL_CMD) ||
9130                                 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9131                                         MENLO_TRANSPORT_TYPE))
9132
9133                                 goto iocb_busy;
9134                         break;
9135                 case CMD_QUE_RING_BUF_CN:
9136                 case CMD_QUE_RING_BUF64_CN:
9137                         /*
9138                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9139                          * completion, iocb_cmpl MUST be 0.
9140                          */
9141                         if (piocb->iocb_cmpl)
9142                                 piocb->iocb_cmpl = NULL;
9143                         /*FALLTHROUGH*/
9144                 case CMD_CREATE_XRI_CR:
9145                 case CMD_CLOSE_XRI_CN:
9146                 case CMD_CLOSE_XRI_CX:
9147                         break;
9148                 default:
9149                         goto iocb_busy;
9150                 }
9151
9152         /*
9153          * For FCP commands, we must be in a state where we can process link
9154          * attention events.
9155          */
9156         } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9157                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9158                 goto iocb_busy;
9159         }
9160
9161         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9162                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9163                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9164
9165         if (iocb)
9166                 lpfc_sli_update_ring(phba, pring);
9167         else
9168                 lpfc_sli_update_full_ring(phba, pring);
9169
9170         if (!piocb)
9171                 return IOCB_SUCCESS;
9172
9173         goto out_busy;
9174
9175  iocb_busy:
9176         pring->stats.iocb_cmd_delay++;
9177
9178  out_busy:
9179
9180         if (!(flag & SLI_IOCB_RET_IOCB)) {
9181                 __lpfc_sli_ringtx_put(phba, pring, piocb);
9182                 return IOCB_SUCCESS;
9183         }
9184
9185         return IOCB_BUSY;
9186 }
9187
9188 /**
9189  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9190  * @phba: Pointer to HBA context object.
9191  * @piocb: Pointer to command iocb.
9192  * @sglq: Pointer to the scatter gather queue object.
9193  *
9194  * This routine converts the bpl or bde that is in the IOCB
9195  * to a sgl list for the sli4 hardware. The physical address
9196  * of the bpl/bde is converted back to a virtual address.
9197  * If the IOCB contains a BPL then the list of BDE's is
9198  * converted to sli4_sge's. If the IOCB contains a single
9199  * BDE then it is converted to a single sli_sge.
9200  * The IOCB is still in cpu endianess so the contents of
9201  * the bpl can be used without byte swapping.
9202  *
9203  * Returns valid XRI = Success, NO_XRI = Failure.
9204 **/
9205 static uint16_t
9206 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9207                 struct lpfc_sglq *sglq)
9208 {
9209         uint16_t xritag = NO_XRI;
9210         struct ulp_bde64 *bpl = NULL;
9211         struct ulp_bde64 bde;
9212         struct sli4_sge *sgl  = NULL;
9213         struct lpfc_dmabuf *dmabuf;
9214         IOCB_t *icmd;
9215         int numBdes = 0;
9216         int i = 0;
9217         uint32_t offset = 0; /* accumulated offset in the sg request list */
9218         int inbound = 0; /* number of sg reply entries inbound from firmware */
9219
9220         if (!piocbq || !sglq)
9221                 return xritag;
9222
9223         sgl  = (struct sli4_sge *)sglq->sgl;
9224         icmd = &piocbq->iocb;
9225         if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9226                 return sglq->sli4_xritag;
9227         if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9228                 numBdes = icmd->un.genreq64.bdl.bdeSize /
9229                                 sizeof(struct ulp_bde64);
9230                 /* The addrHigh and addrLow fields within the IOCB
9231                  * have not been byteswapped yet so there is no
9232                  * need to swap them back.
9233                  */
9234                 if (piocbq->context3)
9235                         dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9236                 else
9237                         return xritag;
9238
9239                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
9240                 if (!bpl)
9241                         return xritag;
9242
9243                 for (i = 0; i < numBdes; i++) {
9244                         /* Should already be byte swapped. */
9245                         sgl->addr_hi = bpl->addrHigh;
9246                         sgl->addr_lo = bpl->addrLow;
9247
9248                         sgl->word2 = le32_to_cpu(sgl->word2);
9249                         if ((i+1) == numBdes)
9250                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
9251                         else
9252                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
9253                         /* swap the size field back to the cpu so we
9254                          * can assign it to the sgl.
9255                          */
9256                         bde.tus.w = le32_to_cpu(bpl->tus.w);
9257                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9258                         /* The offsets in the sgl need to be accumulated
9259                          * separately for the request and reply lists.
9260                          * The request is always first, the reply follows.
9261                          */
9262                         if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9263                                 /* add up the reply sg entries */
9264                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9265                                         inbound++;
9266                                 /* first inbound? reset the offset */
9267                                 if (inbound == 1)
9268                                         offset = 0;
9269                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9270                                 bf_set(lpfc_sli4_sge_type, sgl,
9271                                         LPFC_SGE_TYPE_DATA);
9272                                 offset += bde.tus.f.bdeSize;
9273                         }
9274                         sgl->word2 = cpu_to_le32(sgl->word2);
9275                         bpl++;
9276                         sgl++;
9277                 }
9278         } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9279                         /* The addrHigh and addrLow fields of the BDE have not
9280                          * been byteswapped yet so they need to be swapped
9281                          * before putting them in the sgl.
9282                          */
9283                         sgl->addr_hi =
9284                                 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9285                         sgl->addr_lo =
9286                                 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9287                         sgl->word2 = le32_to_cpu(sgl->word2);
9288                         bf_set(lpfc_sli4_sge_last, sgl, 1);
9289                         sgl->word2 = cpu_to_le32(sgl->word2);
9290                         sgl->sge_len =
9291                                 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9292         }
9293         return sglq->sli4_xritag;
9294 }
9295
9296 /**
9297  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9298  * @phba: Pointer to HBA context object.
9299  * @piocb: Pointer to command iocb.
9300  * @wqe: Pointer to the work queue entry.
9301  *
9302  * This routine converts the iocb command to its Work Queue Entry
9303  * equivalent. The wqe pointer should not have any fields set when
9304  * this routine is called because it will memcpy over them.
9305  * This routine does not set the CQ_ID or the WQEC bits in the
9306  * wqe.
9307  *
9308  * Returns: 0 = Success, IOCB_ERROR = Failure.
9309  **/
9310 static int
9311 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9312                 union lpfc_wqe128 *wqe)
9313 {
9314         uint32_t xmit_len = 0, total_len = 0;
9315         uint8_t ct = 0;
9316         uint32_t fip;
9317         uint32_t abort_tag;
9318         uint8_t command_type = ELS_COMMAND_NON_FIP;
9319         uint8_t cmnd;
9320         uint16_t xritag;
9321         uint16_t abrt_iotag;
9322         struct lpfc_iocbq *abrtiocbq;
9323         struct ulp_bde64 *bpl = NULL;
9324         uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9325         int numBdes, i;
9326         struct ulp_bde64 bde;
9327         struct lpfc_nodelist *ndlp;
9328         uint32_t *pcmd;
9329         uint32_t if_type;
9330
9331         fip = phba->hba_flag & HBA_FIP_SUPPORT;
9332         /* The fcp commands will set command type */
9333         if (iocbq->iocb_flag &  LPFC_IO_FCP)
9334                 command_type = FCP_COMMAND;
9335         else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9336                 command_type = ELS_COMMAND_FIP;
9337         else
9338                 command_type = ELS_COMMAND_NON_FIP;
9339
9340         if (phba->fcp_embed_io)
9341                 memset(wqe, 0, sizeof(union lpfc_wqe128));
9342         /* Some of the fields are in the right position already */
9343         memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9344         /* The ct field has moved so reset */
9345         wqe->generic.wqe_com.word7 = 0;
9346         wqe->generic.wqe_com.word10 = 0;
9347
9348         abort_tag = (uint32_t) iocbq->iotag;
9349         xritag = iocbq->sli4_xritag;
9350         /* words0-2 bpl convert bde */
9351         if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9352                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9353                                 sizeof(struct ulp_bde64);
9354                 bpl  = (struct ulp_bde64 *)
9355                         ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9356                 if (!bpl)
9357                         return IOCB_ERROR;
9358
9359                 /* Should already be byte swapped. */
9360                 wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
9361                 wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
9362                 /* swap the size field back to the cpu so we
9363                  * can assign it to the sgl.
9364                  */
9365                 wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
9366                 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9367                 total_len = 0;
9368                 for (i = 0; i < numBdes; i++) {
9369                         bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
9370                         total_len += bde.tus.f.bdeSize;
9371                 }
9372         } else
9373                 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9374
9375         iocbq->iocb.ulpIoTag = iocbq->iotag;
9376         cmnd = iocbq->iocb.ulpCommand;
9377
9378         switch (iocbq->iocb.ulpCommand) {
9379         case CMD_ELS_REQUEST64_CR:
9380                 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9381                         ndlp = iocbq->context_un.ndlp;
9382                 else
9383                         ndlp = (struct lpfc_nodelist *)iocbq->context1;
9384                 if (!iocbq->iocb.ulpLe) {
9385                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9386                                 "2007 Only Limited Edition cmd Format"
9387                                 " supported 0x%x\n",
9388                                 iocbq->iocb.ulpCommand);
9389                         return IOCB_ERROR;
9390                 }
9391
9392                 wqe->els_req.payload_len = xmit_len;
9393                 /* Els_reguest64 has a TMO */
9394                 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9395                         iocbq->iocb.ulpTimeout);
9396                 /* Need a VF for word 4 set the vf bit*/
9397                 bf_set(els_req64_vf, &wqe->els_req, 0);
9398                 /* And a VFID for word 12 */
9399                 bf_set(els_req64_vfid, &wqe->els_req, 0);
9400                 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9401                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9402                        iocbq->iocb.ulpContext);
9403                 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9404                 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9405                 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9406                 if (command_type == ELS_COMMAND_FIP)
9407                         els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9408                                         >> LPFC_FIP_ELS_ID_SHIFT);
9409                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9410                                         iocbq->context2)->virt);
9411                 if_type = bf_get(lpfc_sli_intf_if_type,
9412                                         &phba->sli4_hba.sli_intf);
9413                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9414                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9415                                 *pcmd == ELS_CMD_SCR ||
9416                                 *pcmd == ELS_CMD_RSCN_XMT ||
9417                                 *pcmd == ELS_CMD_FDISC ||
9418                                 *pcmd == ELS_CMD_LOGO ||
9419                                 *pcmd == ELS_CMD_PLOGI)) {
9420                                 bf_set(els_req64_sp, &wqe->els_req, 1);
9421                                 bf_set(els_req64_sid, &wqe->els_req,
9422                                         iocbq->vport->fc_myDID);
9423                                 if ((*pcmd == ELS_CMD_FLOGI) &&
9424                                         !(phba->fc_topology ==
9425                                                 LPFC_TOPOLOGY_LOOP))
9426                                         bf_set(els_req64_sid, &wqe->els_req, 0);
9427                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9428                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9429                                         phba->vpi_ids[iocbq->vport->vpi]);
9430                         } else if (pcmd && iocbq->context1) {
9431                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9432                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9433                                         phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9434                         }
9435                 }
9436                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9437                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9438                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9439                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9440                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9441                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9442                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9443                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9444                 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9445                 break;
9446         case CMD_XMIT_SEQUENCE64_CX:
9447                 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9448                        iocbq->iocb.un.ulpWord[3]);
9449                 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9450                        iocbq->iocb.unsli3.rcvsli3.ox_id);
9451                 /* The entire sequence is transmitted for this IOCB */
9452                 xmit_len = total_len;
9453                 cmnd = CMD_XMIT_SEQUENCE64_CR;
9454                 if (phba->link_flag & LS_LOOPBACK_MODE)
9455                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9456                 /* fall through */
9457         case CMD_XMIT_SEQUENCE64_CR:
9458                 /* word3 iocb=io_tag32 wqe=reserved */
9459                 wqe->xmit_sequence.rsvd3 = 0;
9460                 /* word4 relative_offset memcpy */
9461                 /* word5 r_ctl/df_ctl memcpy */
9462                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9463                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9464                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9465                        LPFC_WQE_IOD_WRITE);
9466                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9467                        LPFC_WQE_LENLOC_WORD12);
9468                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9469                 wqe->xmit_sequence.xmit_len = xmit_len;
9470                 command_type = OTHER_COMMAND;
9471                 break;
9472         case CMD_XMIT_BCAST64_CN:
9473                 /* word3 iocb=iotag32 wqe=seq_payload_len */
9474                 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9475                 /* word4 iocb=rsvd wqe=rsvd */
9476                 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9477                 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9478                 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9479                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9480                 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9481                 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9482                 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9483                        LPFC_WQE_LENLOC_WORD3);
9484                 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9485                 break;
9486         case CMD_FCP_IWRITE64_CR:
9487                 command_type = FCP_COMMAND_DATA_OUT;
9488                 /* word3 iocb=iotag wqe=payload_offset_len */
9489                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9490                 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9491                        xmit_len + sizeof(struct fcp_rsp));
9492                 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9493                        0);
9494                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9495                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9496                 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9497                        iocbq->iocb.ulpFCP2Rcvy);
9498                 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9499                 /* Always open the exchange */
9500                 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9501                 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9502                        LPFC_WQE_LENLOC_WORD4);
9503                 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9504                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9505                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9506                         bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9507                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9508                         if (iocbq->priority) {
9509                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9510                                        (iocbq->priority << 1));
9511                         } else {
9512                                 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9513                                        (phba->cfg_XLanePriority << 1));
9514                         }
9515                 }
9516                 /* Note, word 10 is already initialized to 0 */
9517
9518                 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9519                 if (phba->cfg_enable_pbde)
9520                         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9521                 else
9522                         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9523
9524                 if (phba->fcp_embed_io) {
9525                         struct lpfc_io_buf *lpfc_cmd;
9526                         struct sli4_sge *sgl;
9527                         struct fcp_cmnd *fcp_cmnd;
9528                         uint32_t *ptr;
9529
9530                         /* 128 byte wqe support here */
9531
9532                         lpfc_cmd = iocbq->context1;
9533                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9534                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9535
9536                         /* Word 0-2 - FCP_CMND */
9537                         wqe->generic.bde.tus.f.bdeFlags =
9538                                 BUFF_TYPE_BDE_IMMED;
9539                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9540                         wqe->generic.bde.addrHigh = 0;
9541                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
9542
9543                         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9544                         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9545
9546                         /* Word 22-29  FCP CMND Payload */
9547                         ptr = &wqe->words[22];
9548                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9549                 }
9550                 break;
9551         case CMD_FCP_IREAD64_CR:
9552                 /* word3 iocb=iotag wqe=payload_offset_len */
9553                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9554                 bf_set(payload_offset_len, &wqe->fcp_iread,
9555                        xmit_len + sizeof(struct fcp_rsp));
9556                 bf_set(cmd_buff_len, &wqe->fcp_iread,
9557                        0);
9558                 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9559                 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9560                 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9561                        iocbq->iocb.ulpFCP2Rcvy);
9562                 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9563                 /* Always open the exchange */
9564                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9565                 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9566                        LPFC_WQE_LENLOC_WORD4);
9567                 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9568                 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9569                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9570                         bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9571                         bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9572                         if (iocbq->priority) {
9573                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9574                                        (iocbq->priority << 1));
9575                         } else {
9576                                 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9577                                        (phba->cfg_XLanePriority << 1));
9578                         }
9579                 }
9580                 /* Note, word 10 is already initialized to 0 */
9581
9582                 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9583                 if (phba->cfg_enable_pbde)
9584                         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9585                 else
9586                         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9587
9588                 if (phba->fcp_embed_io) {
9589                         struct lpfc_io_buf *lpfc_cmd;
9590                         struct sli4_sge *sgl;
9591                         struct fcp_cmnd *fcp_cmnd;
9592                         uint32_t *ptr;
9593
9594                         /* 128 byte wqe support here */
9595
9596                         lpfc_cmd = iocbq->context1;
9597                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9598                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9599
9600                         /* Word 0-2 - FCP_CMND */
9601                         wqe->generic.bde.tus.f.bdeFlags =
9602                                 BUFF_TYPE_BDE_IMMED;
9603                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9604                         wqe->generic.bde.addrHigh = 0;
9605                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
9606
9607                         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9608                         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9609
9610                         /* Word 22-29  FCP CMND Payload */
9611                         ptr = &wqe->words[22];
9612                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9613                 }
9614                 break;
9615         case CMD_FCP_ICMND64_CR:
9616                 /* word3 iocb=iotag wqe=payload_offset_len */
9617                 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9618                 bf_set(payload_offset_len, &wqe->fcp_icmd,
9619                        xmit_len + sizeof(struct fcp_rsp));
9620                 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9621                        0);
9622                 /* word3 iocb=IO_TAG wqe=reserved */
9623                 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9624                 /* Always open the exchange */
9625                 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9626                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9627                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9628                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9629                        LPFC_WQE_LENLOC_NONE);
9630                 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9631                        iocbq->iocb.ulpFCP2Rcvy);
9632                 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9633                         bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9634                         bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9635                         if (iocbq->priority) {
9636                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9637                                        (iocbq->priority << 1));
9638                         } else {
9639                                 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9640                                        (phba->cfg_XLanePriority << 1));
9641                         }
9642                 }
9643                 /* Note, word 10 is already initialized to 0 */
9644
9645                 if (phba->fcp_embed_io) {
9646                         struct lpfc_io_buf *lpfc_cmd;
9647                         struct sli4_sge *sgl;
9648                         struct fcp_cmnd *fcp_cmnd;
9649                         uint32_t *ptr;
9650
9651                         /* 128 byte wqe support here */
9652
9653                         lpfc_cmd = iocbq->context1;
9654                         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9655                         fcp_cmnd = lpfc_cmd->fcp_cmnd;
9656
9657                         /* Word 0-2 - FCP_CMND */
9658                         wqe->generic.bde.tus.f.bdeFlags =
9659                                 BUFF_TYPE_BDE_IMMED;
9660                         wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9661                         wqe->generic.bde.addrHigh = 0;
9662                         wqe->generic.bde.addrLow =  88;  /* Word 22 */
9663
9664                         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9665                         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9666
9667                         /* Word 22-29  FCP CMND Payload */
9668                         ptr = &wqe->words[22];
9669                         memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9670                 }
9671                 break;
9672         case CMD_GEN_REQUEST64_CR:
9673                 /* For this command calculate the xmit length of the
9674                  * request bde.
9675                  */
9676                 xmit_len = 0;
9677                 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9678                         sizeof(struct ulp_bde64);
9679                 for (i = 0; i < numBdes; i++) {
9680                         bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9681                         if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9682                                 break;
9683                         xmit_len += bde.tus.f.bdeSize;
9684                 }
9685                 /* word3 iocb=IO_TAG wqe=request_payload_len */
9686                 wqe->gen_req.request_payload_len = xmit_len;
9687                 /* word4 iocb=parameter wqe=relative_offset memcpy */
9688                 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9689                 /* word6 context tag copied in memcpy */
9690                 if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
9691                         ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9692                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9693                                 "2015 Invalid CT %x command 0x%x\n",
9694                                 ct, iocbq->iocb.ulpCommand);
9695                         return IOCB_ERROR;
9696                 }
9697                 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9698                 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9699                 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9700                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9701                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9702                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9703                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9704                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9705                 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9706                 command_type = OTHER_COMMAND;
9707                 break;
9708         case CMD_XMIT_ELS_RSP64_CX:
9709                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9710                 /* words0-2 BDE memcpy */
9711                 /* word3 iocb=iotag32 wqe=response_payload_len */
9712                 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9713                 /* word4 */
9714                 wqe->xmit_els_rsp.word4 = 0;
9715                 /* word5 iocb=rsvd wge=did */
9716                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9717                          iocbq->iocb.un.xseq64.xmit_els_remoteID);
9718
9719                 if_type = bf_get(lpfc_sli_intf_if_type,
9720                                         &phba->sli4_hba.sli_intf);
9721                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9722                         if (iocbq->vport->fc_flag & FC_PT2PT) {
9723                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9724                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9725                                         iocbq->vport->fc_myDID);
9726                                 if (iocbq->vport->fc_myDID == Fabric_DID) {
9727                                         bf_set(wqe_els_did,
9728                                                 &wqe->xmit_els_rsp.wqe_dest, 0);
9729                                 }
9730                         }
9731                 }
9732                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9733                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9734                 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9735                 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9736                        iocbq->iocb.unsli3.rcvsli3.ox_id);
9737                 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9738                         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9739                                phba->vpi_ids[iocbq->vport->vpi]);
9740                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9741                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9742                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9743                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9744                        LPFC_WQE_LENLOC_WORD3);
9745                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9746                 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9747                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9748                 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9749                                         iocbq->context2)->virt);
9750                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9751                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9752                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9753                                         iocbq->vport->fc_myDID);
9754                                 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9755                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9756                                         phba->vpi_ids[phba->pport->vpi]);
9757                 }
9758                 command_type = OTHER_COMMAND;
9759                 break;
9760         case CMD_CLOSE_XRI_CN:
9761         case CMD_ABORT_XRI_CN:
9762         case CMD_ABORT_XRI_CX:
9763                 /* words 0-2 memcpy should be 0 rserved */
9764                 /* port will send abts */
9765                 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9766                 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9767                         abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9768                         fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9769                 } else
9770                         fip = 0;
9771
9772                 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9773                         /*
9774                          * The link is down, or the command was ELS_FIP
9775                          * so the fw does not need to send abts
9776                          * on the wire.
9777                          */
9778                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9779                 else
9780                         bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9781                 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9782                 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9783                 wqe->abort_cmd.rsrvd5 = 0;
9784                 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9785                         ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9786                 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9787                 /*
9788                  * The abort handler will send us CMD_ABORT_XRI_CN or
9789                  * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9790                  */
9791                 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9792                 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9793                 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9794                        LPFC_WQE_LENLOC_NONE);
9795                 cmnd = CMD_ABORT_XRI_CX;
9796                 command_type = OTHER_COMMAND;
9797                 xritag = 0;
9798                 break;
9799         case CMD_XMIT_BLS_RSP64_CX:
9800                 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9801                 /* As BLS ABTS RSP WQE is very different from other WQEs,
9802                  * we re-construct this WQE here based on information in
9803                  * iocbq from scratch.
9804                  */
9805                 memset(wqe, 0, sizeof(*wqe));
9806                 /* OX_ID is invariable to who sent ABTS to CT exchange */
9807                 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9808                        bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9809                 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9810                     LPFC_ABTS_UNSOL_INT) {
9811                         /* ABTS sent by initiator to CT exchange, the
9812                          * RX_ID field will be filled with the newly
9813                          * allocated responder XRI.
9814                          */
9815                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9816                                iocbq->sli4_xritag);
9817                 } else {
9818                         /* ABTS sent by responder to CT exchange, the
9819                          * RX_ID field will be filled with the responder
9820                          * RX_ID from ABTS.
9821                          */
9822                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9823                                bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9824                 }
9825                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9826                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9827
9828                 /* Use CT=VPI */
9829                 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9830                         ndlp->nlp_DID);
9831                 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9832                         iocbq->iocb.ulpContext);
9833                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9834                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9835                         phba->vpi_ids[phba->pport->vpi]);
9836                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9837                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9838                        LPFC_WQE_LENLOC_NONE);
9839                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9840                 command_type = OTHER_COMMAND;
9841                 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9842                         bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9843                                bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9844                         bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9845                                bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9846                         bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9847                                bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9848                 }
9849
9850                 break;
9851         case CMD_SEND_FRAME:
9852                 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
9853                 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
9854                 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
9855                 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
9856                 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
9857                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
9858                 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
9859                 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
9860                 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9861                 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9862                 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9863                 return 0;
9864         case CMD_XRI_ABORTED_CX:
9865         case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9866         case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9867         case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9868         case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9869         case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9870         default:
9871                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9872                                 "2014 Invalid command 0x%x\n",
9873                                 iocbq->iocb.ulpCommand);
9874                 return IOCB_ERROR;
9875                 break;
9876         }
9877
9878         if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9879                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9880         else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9881                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9882         else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9883                 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9884         iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9885                               LPFC_IO_DIF_INSERT);
9886         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9887         bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9888         wqe->generic.wqe_com.abort_tag = abort_tag;
9889         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9890         bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9891         bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9892         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9893         return 0;
9894 }
9895
9896 /**
9897  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9898  * @phba: Pointer to HBA context object.
9899  * @ring_number: SLI ring number to issue iocb on.
9900  * @piocb: Pointer to command iocb.
9901  * @flag: Flag indicating if this command can be put into txq.
9902  *
9903  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9904  * an iocb command to an HBA with SLI-4 interface spec.
9905  *
9906  * This function is called with hbalock held. The function will return success
9907  * after it successfully submit the iocb to firmware or after adding to the
9908  * txq.
9909  **/
9910 static int
9911 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9912                          struct lpfc_iocbq *piocb, uint32_t flag)
9913 {
9914         struct lpfc_sglq *sglq;
9915         union lpfc_wqe128 wqe;
9916         struct lpfc_queue *wq;
9917         struct lpfc_sli_ring *pring;
9918
9919         /* Get the WQ */
9920         if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9921             (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9922                 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
9923         } else {
9924                 wq = phba->sli4_hba.els_wq;
9925         }
9926
9927         /* Get corresponding ring */
9928         pring = wq->pring;
9929
9930         /*
9931          * The WQE can be either 64 or 128 bytes,
9932          */
9933
9934         lockdep_assert_held(&pring->ring_lock);
9935
9936         if (piocb->sli4_xritag == NO_XRI) {
9937                 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9938                     piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9939                         sglq = NULL;
9940                 else {
9941                         if (!list_empty(&pring->txq)) {
9942                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
9943                                         __lpfc_sli_ringtx_put(phba,
9944                                                 pring, piocb);
9945                                         return IOCB_SUCCESS;
9946                                 } else {
9947                                         return IOCB_BUSY;
9948                                 }
9949                         } else {
9950                                 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9951                                 if (!sglq) {
9952                                         if (!(flag & SLI_IOCB_RET_IOCB)) {
9953                                                 __lpfc_sli_ringtx_put(phba,
9954                                                                 pring,
9955                                                                 piocb);
9956                                                 return IOCB_SUCCESS;
9957                                         } else
9958                                                 return IOCB_BUSY;
9959                                 }
9960                         }
9961                 }
9962         } else if (piocb->iocb_flag &  LPFC_IO_FCP)
9963                 /* These IO's already have an XRI and a mapped sgl. */
9964                 sglq = NULL;
9965         else {
9966                 /*
9967                  * This is a continuation of a commandi,(CX) so this
9968                  * sglq is on the active list
9969                  */
9970                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9971                 if (!sglq)
9972                         return IOCB_ERROR;
9973         }
9974
9975         if (sglq) {
9976                 piocb->sli4_lxritag = sglq->sli4_lxritag;
9977                 piocb->sli4_xritag = sglq->sli4_xritag;
9978                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9979                         return IOCB_ERROR;
9980         }
9981
9982         if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9983                 return IOCB_ERROR;
9984
9985         if (lpfc_sli4_wq_put(wq, &wqe))
9986                 return IOCB_ERROR;
9987         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9988
9989         return 0;
9990 }
9991
9992 /**
9993  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9994  *
9995  * This routine wraps the actual lockless version for issusing IOCB function
9996  * pointer from the lpfc_hba struct.
9997  *
9998  * Return codes:
9999  * IOCB_ERROR - Error
10000  * IOCB_SUCCESS - Success
10001  * IOCB_BUSY - Busy
10002  **/
10003 int
10004 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10005                 struct lpfc_iocbq *piocb, uint32_t flag)
10006 {
10007         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10008 }
10009
10010 /**
10011  * lpfc_sli_api_table_setup - Set up sli api function jump table
10012  * @phba: The hba struct for which this call is being executed.
10013  * @dev_grp: The HBA PCI-Device group number.
10014  *
10015  * This routine sets up the SLI interface API function jump table in @phba
10016  * struct.
10017  * Returns: 0 - success, -ENODEV - failure.
10018  **/
10019 int
10020 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10021 {
10022
10023         switch (dev_grp) {
10024         case LPFC_PCI_DEV_LP:
10025                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10026                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10027                 break;
10028         case LPFC_PCI_DEV_OC:
10029                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10030                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10031                 break;
10032         default:
10033                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10034                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
10035                                 dev_grp);
10036                 return -ENODEV;
10037                 break;
10038         }
10039         phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10040         return 0;
10041 }
10042
10043 /**
10044  * lpfc_sli4_calc_ring - Calculates which ring to use
10045  * @phba: Pointer to HBA context object.
10046  * @piocb: Pointer to command iocb.
10047  *
10048  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10049  * hba_wqidx, thus we need to calculate the corresponding ring.
10050  * Since ABORTS must go on the same WQ of the command they are
10051  * aborting, we use command's hba_wqidx.
10052  */
10053 struct lpfc_sli_ring *
10054 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10055 {
10056         struct lpfc_io_buf *lpfc_cmd;
10057
10058         if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10059                 if (unlikely(!phba->sli4_hba.hdwq))
10060                         return NULL;
10061                 /*
10062                  * for abort iocb hba_wqidx should already
10063                  * be setup based on what work queue we used.
10064                  */
10065                 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10066                         lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10067                         piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10068                 }
10069                 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10070         } else {
10071                 if (unlikely(!phba->sli4_hba.els_wq))
10072                         return NULL;
10073                 piocb->hba_wqidx = 0;
10074                 return phba->sli4_hba.els_wq->pring;
10075         }
10076 }
10077
10078 /**
10079  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10080  * @phba: Pointer to HBA context object.
10081  * @pring: Pointer to driver SLI ring object.
10082  * @piocb: Pointer to command iocb.
10083  * @flag: Flag indicating if this command can be put into txq.
10084  *
10085  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10086  * function. This function gets the hbalock and calls
10087  * __lpfc_sli_issue_iocb function and will return the error returned
10088  * by __lpfc_sli_issue_iocb function. This wrapper is used by
10089  * functions which do not hold hbalock.
10090  **/
10091 int
10092 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10093                     struct lpfc_iocbq *piocb, uint32_t flag)
10094 {
10095         struct lpfc_sli_ring *pring;
10096         unsigned long iflags;
10097         int rc;
10098
10099         if (phba->sli_rev == LPFC_SLI_REV4) {
10100                 pring = lpfc_sli4_calc_ring(phba, piocb);
10101                 if (unlikely(pring == NULL))
10102                         return IOCB_ERROR;
10103
10104                 spin_lock_irqsave(&pring->ring_lock, iflags);
10105                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10106                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10107         } else {
10108                 /* For now, SLI2/3 will still use hbalock */
10109                 spin_lock_irqsave(&phba->hbalock, iflags);
10110                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10111                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10112         }
10113         return rc;
10114 }
10115
10116 /**
10117  * lpfc_extra_ring_setup - Extra ring setup function
10118  * @phba: Pointer to HBA context object.
10119  *
10120  * This function is called while driver attaches with the
10121  * HBA to setup the extra ring. The extra ring is used
10122  * only when driver needs to support target mode functionality
10123  * or IP over FC functionalities.
10124  *
10125  * This function is called with no lock held. SLI3 only.
10126  **/
10127 static int
10128 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10129 {
10130         struct lpfc_sli *psli;
10131         struct lpfc_sli_ring *pring;
10132
10133         psli = &phba->sli;
10134
10135         /* Adjust cmd/rsp ring iocb entries more evenly */
10136
10137         /* Take some away from the FCP ring */
10138         pring = &psli->sli3_ring[LPFC_FCP_RING];
10139         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10140         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10141         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10142         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10143
10144         /* and give them to the extra ring */
10145         pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10146
10147         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10148         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10149         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10150         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10151
10152         /* Setup default profile for this ring */
10153         pring->iotag_max = 4096;
10154         pring->num_mask = 1;
10155         pring->prt[0].profile = 0;      /* Mask 0 */
10156         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10157         pring->prt[0].type = phba->cfg_multi_ring_type;
10158         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10159         return 0;
10160 }
10161
10162 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10163  * @phba: Pointer to HBA context object.
10164  * @iocbq: Pointer to iocb object.
10165  *
10166  * The async_event handler calls this routine when it receives
10167  * an ASYNC_STATUS_CN event from the port.  The port generates
10168  * this event when an Abort Sequence request to an rport fails
10169  * twice in succession.  The abort could be originated by the
10170  * driver or by the port.  The ABTS could have been for an ELS
10171  * or FCP IO.  The port only generates this event when an ABTS
10172  * fails to complete after one retry.
10173  */
10174 static void
10175 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10176                           struct lpfc_iocbq *iocbq)
10177 {
10178         struct lpfc_nodelist *ndlp = NULL;
10179         uint16_t rpi = 0, vpi = 0;
10180         struct lpfc_vport *vport = NULL;
10181
10182         /* The rpi in the ulpContext is vport-sensitive. */
10183         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10184         rpi = iocbq->iocb.ulpContext;
10185
10186         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10187                         "3092 Port generated ABTS async event "
10188                         "on vpi %d rpi %d status 0x%x\n",
10189                         vpi, rpi, iocbq->iocb.ulpStatus);
10190
10191         vport = lpfc_find_vport_by_vpid(phba, vpi);
10192         if (!vport)
10193                 goto err_exit;
10194         ndlp = lpfc_findnode_rpi(vport, rpi);
10195         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10196                 goto err_exit;
10197
10198         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10199                 lpfc_sli_abts_recover_port(vport, ndlp);
10200         return;
10201
10202  err_exit:
10203         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10204                         "3095 Event Context not found, no "
10205                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10206                         iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10207                         vpi, rpi);
10208 }
10209
10210 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10211  * @phba: pointer to HBA context object.
10212  * @ndlp: nodelist pointer for the impacted rport.
10213  * @axri: pointer to the wcqe containing the failed exchange.
10214  *
10215  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10216  * port.  The port generates this event when an abort exchange request to an
10217  * rport fails twice in succession with no reply.  The abort could be originated
10218  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
10219  */
10220 void
10221 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10222                            struct lpfc_nodelist *ndlp,
10223                            struct sli4_wcqe_xri_aborted *axri)
10224 {
10225         struct lpfc_vport *vport;
10226         uint32_t ext_status = 0;
10227
10228         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10229                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10230                                 "3115 Node Context not found, driver "
10231                                 "ignoring abts err event\n");
10232                 return;
10233         }
10234
10235         vport = ndlp->vport;
10236         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10237                         "3116 Port generated FCP XRI ABORT event on "
10238                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10239                         ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10240                         bf_get(lpfc_wcqe_xa_xri, axri),
10241                         bf_get(lpfc_wcqe_xa_status, axri),
10242                         axri->parameter);
10243
10244         /*
10245          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
10246          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10247          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10248          */
10249         ext_status = axri->parameter & IOERR_PARAM_MASK;
10250         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10251             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10252                 lpfc_sli_abts_recover_port(vport, ndlp);
10253 }
10254
10255 /**
10256  * lpfc_sli_async_event_handler - ASYNC iocb handler function
10257  * @phba: Pointer to HBA context object.
10258  * @pring: Pointer to driver SLI ring object.
10259  * @iocbq: Pointer to iocb object.
10260  *
10261  * This function is called by the slow ring event handler
10262  * function when there is an ASYNC event iocb in the ring.
10263  * This function is called with no lock held.
10264  * Currently this function handles only temperature related
10265  * ASYNC events. The function decodes the temperature sensor
10266  * event message and posts events for the management applications.
10267  **/
10268 static void
10269 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10270         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10271 {
10272         IOCB_t *icmd;
10273         uint16_t evt_code;
10274         struct temp_event temp_event_data;
10275         struct Scsi_Host *shost;
10276         uint32_t *iocb_w;
10277
10278         icmd = &iocbq->iocb;
10279         evt_code = icmd->un.asyncstat.evt_code;
10280
10281         switch (evt_code) {
10282         case ASYNC_TEMP_WARN:
10283         case ASYNC_TEMP_SAFE:
10284                 temp_event_data.data = (uint32_t) icmd->ulpContext;
10285                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10286                 if (evt_code == ASYNC_TEMP_WARN) {
10287                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10288                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10289                                 "0347 Adapter is very hot, please take "
10290                                 "corrective action. temperature : %d Celsius\n",
10291                                 (uint32_t) icmd->ulpContext);
10292                 } else {
10293                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
10294                         lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10295                                 "0340 Adapter temperature is OK now. "
10296                                 "temperature : %d Celsius\n",
10297                                 (uint32_t) icmd->ulpContext);
10298                 }
10299
10300                 /* Send temperature change event to applications */
10301                 shost = lpfc_shost_from_vport(phba->pport);
10302                 fc_host_post_vendor_event(shost, fc_get_event_number(),
10303                         sizeof(temp_event_data), (char *) &temp_event_data,
10304                         LPFC_NL_VENDOR_ID);
10305                 break;
10306         case ASYNC_STATUS_CN:
10307                 lpfc_sli_abts_err_handler(phba, iocbq);
10308                 break;
10309         default:
10310                 iocb_w = (uint32_t *) icmd;
10311                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10312                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
10313                         " evt_code 0x%x\n"
10314                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
10315                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
10316                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
10317                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10318                         pring->ringno, icmd->un.asyncstat.evt_code,
10319                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10320                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10321                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10322                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10323
10324                 break;
10325         }
10326 }
10327
10328
10329 /**
10330  * lpfc_sli4_setup - SLI ring setup function
10331  * @phba: Pointer to HBA context object.
10332  *
10333  * lpfc_sli_setup sets up rings of the SLI interface with
10334  * number of iocbs per ring and iotags. This function is
10335  * called while driver attach to the HBA and before the
10336  * interrupts are enabled. So there is no need for locking.
10337  *
10338  * This function always returns 0.
10339  **/
10340 int
10341 lpfc_sli4_setup(struct lpfc_hba *phba)
10342 {
10343         struct lpfc_sli_ring *pring;
10344
10345         pring = phba->sli4_hba.els_wq->pring;
10346         pring->num_mask = LPFC_MAX_RING_MASK;
10347         pring->prt[0].profile = 0;      /* Mask 0 */
10348         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10349         pring->prt[0].type = FC_TYPE_ELS;
10350         pring->prt[0].lpfc_sli_rcv_unsol_event =
10351             lpfc_els_unsol_event;
10352         pring->prt[1].profile = 0;      /* Mask 1 */
10353         pring->prt[1].rctl = FC_RCTL_ELS_REP;
10354         pring->prt[1].type = FC_TYPE_ELS;
10355         pring->prt[1].lpfc_sli_rcv_unsol_event =
10356             lpfc_els_unsol_event;
10357         pring->prt[2].profile = 0;      /* Mask 2 */
10358         /* NameServer Inquiry */
10359         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10360         /* NameServer */
10361         pring->prt[2].type = FC_TYPE_CT;
10362         pring->prt[2].lpfc_sli_rcv_unsol_event =
10363             lpfc_ct_unsol_event;
10364         pring->prt[3].profile = 0;      /* Mask 3 */
10365         /* NameServer response */
10366         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10367         /* NameServer */
10368         pring->prt[3].type = FC_TYPE_CT;
10369         pring->prt[3].lpfc_sli_rcv_unsol_event =
10370             lpfc_ct_unsol_event;
10371         return 0;
10372 }
10373
10374 /**
10375  * lpfc_sli_setup - SLI ring setup function
10376  * @phba: Pointer to HBA context object.
10377  *
10378  * lpfc_sli_setup sets up rings of the SLI interface with
10379  * number of iocbs per ring and iotags. This function is
10380  * called while driver attach to the HBA and before the
10381  * interrupts are enabled. So there is no need for locking.
10382  *
10383  * This function always returns 0. SLI3 only.
10384  **/
10385 int
10386 lpfc_sli_setup(struct lpfc_hba *phba)
10387 {
10388         int i, totiocbsize = 0;
10389         struct lpfc_sli *psli = &phba->sli;
10390         struct lpfc_sli_ring *pring;
10391
10392         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10393         psli->sli_flag = 0;
10394
10395         psli->iocbq_lookup = NULL;
10396         psli->iocbq_lookup_len = 0;
10397         psli->last_iotag = 0;
10398
10399         for (i = 0; i < psli->num_rings; i++) {
10400                 pring = &psli->sli3_ring[i];
10401                 switch (i) {
10402                 case LPFC_FCP_RING:     /* ring 0 - FCP */
10403                         /* numCiocb and numRiocb are used in config_port */
10404                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10405                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10406                         pring->sli.sli3.numCiocb +=
10407                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10408                         pring->sli.sli3.numRiocb +=
10409                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10410                         pring->sli.sli3.numCiocb +=
10411                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10412                         pring->sli.sli3.numRiocb +=
10413                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10414                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10415                                                         SLI3_IOCB_CMD_SIZE :
10416                                                         SLI2_IOCB_CMD_SIZE;
10417                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10418                                                         SLI3_IOCB_RSP_SIZE :
10419                                                         SLI2_IOCB_RSP_SIZE;
10420                         pring->iotag_ctr = 0;
10421                         pring->iotag_max =
10422                             (phba->cfg_hba_queue_depth * 2);
10423                         pring->fast_iotag = pring->iotag_max;
10424                         pring->num_mask = 0;
10425                         break;
10426                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
10427                         /* numCiocb and numRiocb are used in config_port */
10428                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10429                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10430                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10431                                                         SLI3_IOCB_CMD_SIZE :
10432                                                         SLI2_IOCB_CMD_SIZE;
10433                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10434                                                         SLI3_IOCB_RSP_SIZE :
10435                                                         SLI2_IOCB_RSP_SIZE;
10436                         pring->iotag_max = phba->cfg_hba_queue_depth;
10437                         pring->num_mask = 0;
10438                         break;
10439                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
10440                         /* numCiocb and numRiocb are used in config_port */
10441                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10442                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10443                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10444                                                         SLI3_IOCB_CMD_SIZE :
10445                                                         SLI2_IOCB_CMD_SIZE;
10446                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10447                                                         SLI3_IOCB_RSP_SIZE :
10448                                                         SLI2_IOCB_RSP_SIZE;
10449                         pring->fast_iotag = 0;
10450                         pring->iotag_ctr = 0;
10451                         pring->iotag_max = 4096;
10452                         pring->lpfc_sli_rcv_async_status =
10453                                 lpfc_sli_async_event_handler;
10454                         pring->num_mask = LPFC_MAX_RING_MASK;
10455                         pring->prt[0].profile = 0;      /* Mask 0 */
10456                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10457                         pring->prt[0].type = FC_TYPE_ELS;
10458                         pring->prt[0].lpfc_sli_rcv_unsol_event =
10459                             lpfc_els_unsol_event;
10460                         pring->prt[1].profile = 0;      /* Mask 1 */
10461                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
10462                         pring->prt[1].type = FC_TYPE_ELS;
10463                         pring->prt[1].lpfc_sli_rcv_unsol_event =
10464                             lpfc_els_unsol_event;
10465                         pring->prt[2].profile = 0;      /* Mask 2 */
10466                         /* NameServer Inquiry */
10467                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10468                         /* NameServer */
10469                         pring->prt[2].type = FC_TYPE_CT;
10470                         pring->prt[2].lpfc_sli_rcv_unsol_event =
10471                             lpfc_ct_unsol_event;
10472                         pring->prt[3].profile = 0;      /* Mask 3 */
10473                         /* NameServer response */
10474                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10475                         /* NameServer */
10476                         pring->prt[3].type = FC_TYPE_CT;
10477                         pring->prt[3].lpfc_sli_rcv_unsol_event =
10478                             lpfc_ct_unsol_event;
10479                         break;
10480                 }
10481                 totiocbsize += (pring->sli.sli3.numCiocb *
10482                         pring->sli.sli3.sizeCiocb) +
10483                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10484         }
10485         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10486                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10487                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10488                        "SLI2 SLIM Data: x%x x%lx\n",
10489                        phba->brd_no, totiocbsize,
10490                        (unsigned long) MAX_SLIM_IOCB_SIZE);
10491         }
10492         if (phba->cfg_multi_ring_support == 2)
10493                 lpfc_extra_ring_setup(phba);
10494
10495         return 0;
10496 }
10497
10498 /**
10499  * lpfc_sli4_queue_init - Queue initialization function
10500  * @phba: Pointer to HBA context object.
10501  *
10502  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10503  * ring. This function also initializes ring indices of each ring.
10504  * This function is called during the initialization of the SLI
10505  * interface of an HBA.
10506  * This function is called with no lock held and always returns
10507  * 1.
10508  **/
10509 void
10510 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10511 {
10512         struct lpfc_sli *psli;
10513         struct lpfc_sli_ring *pring;
10514         int i;
10515
10516         psli = &phba->sli;
10517         spin_lock_irq(&phba->hbalock);
10518         INIT_LIST_HEAD(&psli->mboxq);
10519         INIT_LIST_HEAD(&psli->mboxq_cmpl);
10520         /* Initialize list headers for txq and txcmplq as double linked lists */
10521         for (i = 0; i < phba->cfg_hdw_queue; i++) {
10522                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10523                 pring->flag = 0;
10524                 pring->ringno = LPFC_FCP_RING;
10525                 pring->txcmplq_cnt = 0;
10526                 INIT_LIST_HEAD(&pring->txq);
10527                 INIT_LIST_HEAD(&pring->txcmplq);
10528                 INIT_LIST_HEAD(&pring->iocb_continueq);
10529                 spin_lock_init(&pring->ring_lock);
10530         }
10531         pring = phba->sli4_hba.els_wq->pring;
10532         pring->flag = 0;
10533         pring->ringno = LPFC_ELS_RING;
10534         pring->txcmplq_cnt = 0;
10535         INIT_LIST_HEAD(&pring->txq);
10536         INIT_LIST_HEAD(&pring->txcmplq);
10537         INIT_LIST_HEAD(&pring->iocb_continueq);
10538         spin_lock_init(&pring->ring_lock);
10539
10540         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10541                 pring = phba->sli4_hba.nvmels_wq->pring;
10542                 pring->flag = 0;
10543                 pring->ringno = LPFC_ELS_RING;
10544                 pring->txcmplq_cnt = 0;
10545                 INIT_LIST_HEAD(&pring->txq);
10546                 INIT_LIST_HEAD(&pring->txcmplq);
10547                 INIT_LIST_HEAD(&pring->iocb_continueq);
10548                 spin_lock_init(&pring->ring_lock);
10549         }
10550
10551         spin_unlock_irq(&phba->hbalock);
10552 }
10553
10554 /**
10555  * lpfc_sli_queue_init - Queue initialization function
10556  * @phba: Pointer to HBA context object.
10557  *
10558  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10559  * ring. This function also initializes ring indices of each ring.
10560  * This function is called during the initialization of the SLI
10561  * interface of an HBA.
10562  * This function is called with no lock held and always returns
10563  * 1.
10564  **/
10565 void
10566 lpfc_sli_queue_init(struct lpfc_hba *phba)
10567 {
10568         struct lpfc_sli *psli;
10569         struct lpfc_sli_ring *pring;
10570         int i;
10571
10572         psli = &phba->sli;
10573         spin_lock_irq(&phba->hbalock);
10574         INIT_LIST_HEAD(&psli->mboxq);
10575         INIT_LIST_HEAD(&psli->mboxq_cmpl);
10576         /* Initialize list headers for txq and txcmplq as double linked lists */
10577         for (i = 0; i < psli->num_rings; i++) {
10578                 pring = &psli->sli3_ring[i];
10579                 pring->ringno = i;
10580                 pring->sli.sli3.next_cmdidx  = 0;
10581                 pring->sli.sli3.local_getidx = 0;
10582                 pring->sli.sli3.cmdidx = 0;
10583                 INIT_LIST_HEAD(&pring->iocb_continueq);
10584                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10585                 INIT_LIST_HEAD(&pring->postbufq);
10586                 pring->flag = 0;
10587                 INIT_LIST_HEAD(&pring->txq);
10588                 INIT_LIST_HEAD(&pring->txcmplq);
10589                 spin_lock_init(&pring->ring_lock);
10590         }
10591         spin_unlock_irq(&phba->hbalock);
10592 }
10593
10594 /**
10595  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10596  * @phba: Pointer to HBA context object.
10597  *
10598  * This routine flushes the mailbox command subsystem. It will unconditionally
10599  * flush all the mailbox commands in the three possible stages in the mailbox
10600  * command sub-system: pending mailbox command queue; the outstanding mailbox
10601  * command; and completed mailbox command queue. It is caller's responsibility
10602  * to make sure that the driver is in the proper state to flush the mailbox
10603  * command sub-system. Namely, the posting of mailbox commands into the
10604  * pending mailbox command queue from the various clients must be stopped;
10605  * either the HBA is in a state that it will never works on the outstanding
10606  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10607  * mailbox command has been completed.
10608  **/
10609 static void
10610 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10611 {
10612         LIST_HEAD(completions);
10613         struct lpfc_sli *psli = &phba->sli;
10614         LPFC_MBOXQ_t *pmb;
10615         unsigned long iflag;
10616
10617         /* Disable softirqs, including timers from obtaining phba->hbalock */
10618         local_bh_disable();
10619
10620         /* Flush all the mailbox commands in the mbox system */
10621         spin_lock_irqsave(&phba->hbalock, iflag);
10622
10623         /* The pending mailbox command queue */
10624         list_splice_init(&phba->sli.mboxq, &completions);
10625         /* The outstanding active mailbox command */
10626         if (psli->mbox_active) {
10627                 list_add_tail(&psli->mbox_active->list, &completions);
10628                 psli->mbox_active = NULL;
10629                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10630         }
10631         /* The completed mailbox command queue */
10632         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10633         spin_unlock_irqrestore(&phba->hbalock, iflag);
10634
10635         /* Enable softirqs again, done with phba->hbalock */
10636         local_bh_enable();
10637
10638         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10639         while (!list_empty(&completions)) {
10640                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10641                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10642                 if (pmb->mbox_cmpl)
10643                         pmb->mbox_cmpl(phba, pmb);
10644         }
10645 }
10646
10647 /**
10648  * lpfc_sli_host_down - Vport cleanup function
10649  * @vport: Pointer to virtual port object.
10650  *
10651  * lpfc_sli_host_down is called to clean up the resources
10652  * associated with a vport before destroying virtual
10653  * port data structures.
10654  * This function does following operations:
10655  * - Free discovery resources associated with this virtual
10656  *   port.
10657  * - Free iocbs associated with this virtual port in
10658  *   the txq.
10659  * - Send abort for all iocb commands associated with this
10660  *   vport in txcmplq.
10661  *
10662  * This function is called with no lock held and always returns 1.
10663  **/
10664 int
10665 lpfc_sli_host_down(struct lpfc_vport *vport)
10666 {
10667         LIST_HEAD(completions);
10668         struct lpfc_hba *phba = vport->phba;
10669         struct lpfc_sli *psli = &phba->sli;
10670         struct lpfc_queue *qp = NULL;
10671         struct lpfc_sli_ring *pring;
10672         struct lpfc_iocbq *iocb, *next_iocb;
10673         int i;
10674         unsigned long flags = 0;
10675         uint16_t prev_pring_flag;
10676
10677         lpfc_cleanup_discovery_resources(vport);
10678
10679         spin_lock_irqsave(&phba->hbalock, flags);
10680
10681         /*
10682          * Error everything on the txq since these iocbs
10683          * have not been given to the FW yet.
10684          * Also issue ABTS for everything on the txcmplq
10685          */
10686         if (phba->sli_rev != LPFC_SLI_REV4) {
10687                 for (i = 0; i < psli->num_rings; i++) {
10688                         pring = &psli->sli3_ring[i];
10689                         prev_pring_flag = pring->flag;
10690                         /* Only slow rings */
10691                         if (pring->ringno == LPFC_ELS_RING) {
10692                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10693                                 /* Set the lpfc data pending flag */
10694                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10695                         }
10696                         list_for_each_entry_safe(iocb, next_iocb,
10697                                                  &pring->txq, list) {
10698                                 if (iocb->vport != vport)
10699                                         continue;
10700                                 list_move_tail(&iocb->list, &completions);
10701                         }
10702                         list_for_each_entry_safe(iocb, next_iocb,
10703                                                  &pring->txcmplq, list) {
10704                                 if (iocb->vport != vport)
10705                                         continue;
10706                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10707                         }
10708                         pring->flag = prev_pring_flag;
10709                 }
10710         } else {
10711                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10712                         pring = qp->pring;
10713                         if (!pring)
10714                                 continue;
10715                         if (pring == phba->sli4_hba.els_wq->pring) {
10716                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10717                                 /* Set the lpfc data pending flag */
10718                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10719                         }
10720                         prev_pring_flag = pring->flag;
10721                         spin_lock(&pring->ring_lock);
10722                         list_for_each_entry_safe(iocb, next_iocb,
10723                                                  &pring->txq, list) {
10724                                 if (iocb->vport != vport)
10725                                         continue;
10726                                 list_move_tail(&iocb->list, &completions);
10727                         }
10728                         spin_unlock(&pring->ring_lock);
10729                         list_for_each_entry_safe(iocb, next_iocb,
10730                                                  &pring->txcmplq, list) {
10731                                 if (iocb->vport != vport)
10732                                         continue;
10733                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10734                         }
10735                         pring->flag = prev_pring_flag;
10736                 }
10737         }
10738         spin_unlock_irqrestore(&phba->hbalock, flags);
10739
10740         /* Cancel all the IOCBs from the completions list */
10741         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10742                               IOERR_SLI_DOWN);
10743         return 1;
10744 }
10745
10746 /**
10747  * lpfc_sli_hba_down - Resource cleanup function for the HBA
10748  * @phba: Pointer to HBA context object.
10749  *
10750  * This function cleans up all iocb, buffers, mailbox commands
10751  * while shutting down the HBA. This function is called with no
10752  * lock held and always returns 1.
10753  * This function does the following to cleanup driver resources:
10754  * - Free discovery resources for each virtual port
10755  * - Cleanup any pending fabric iocbs
10756  * - Iterate through the iocb txq and free each entry
10757  *   in the list.
10758  * - Free up any buffer posted to the HBA
10759  * - Free mailbox commands in the mailbox queue.
10760  **/
10761 int
10762 lpfc_sli_hba_down(struct lpfc_hba *phba)
10763 {
10764         LIST_HEAD(completions);
10765         struct lpfc_sli *psli = &phba->sli;
10766         struct lpfc_queue *qp = NULL;
10767         struct lpfc_sli_ring *pring;
10768         struct lpfc_dmabuf *buf_ptr;
10769         unsigned long flags = 0;
10770         int i;
10771
10772         /* Shutdown the mailbox command sub-system */
10773         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10774
10775         lpfc_hba_down_prep(phba);
10776
10777         /* Disable softirqs, including timers from obtaining phba->hbalock */
10778         local_bh_disable();
10779
10780         lpfc_fabric_abort_hba(phba);
10781
10782         spin_lock_irqsave(&phba->hbalock, flags);
10783
10784         /*
10785          * Error everything on the txq since these iocbs
10786          * have not been given to the FW yet.
10787          */
10788         if (phba->sli_rev != LPFC_SLI_REV4) {
10789                 for (i = 0; i < psli->num_rings; i++) {
10790                         pring = &psli->sli3_ring[i];
10791                         /* Only slow rings */
10792                         if (pring->ringno == LPFC_ELS_RING) {
10793                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10794                                 /* Set the lpfc data pending flag */
10795                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10796                         }
10797                         list_splice_init(&pring->txq, &completions);
10798                 }
10799         } else {
10800                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10801                         pring = qp->pring;
10802                         if (!pring)
10803                                 continue;
10804                         spin_lock(&pring->ring_lock);
10805                         list_splice_init(&pring->txq, &completions);
10806                         spin_unlock(&pring->ring_lock);
10807                         if (pring == phba->sli4_hba.els_wq->pring) {
10808                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10809                                 /* Set the lpfc data pending flag */
10810                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
10811                         }
10812                 }
10813         }
10814         spin_unlock_irqrestore(&phba->hbalock, flags);
10815
10816         /* Cancel all the IOCBs from the completions list */
10817         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10818                               IOERR_SLI_DOWN);
10819
10820         spin_lock_irqsave(&phba->hbalock, flags);
10821         list_splice_init(&phba->elsbuf, &completions);
10822         phba->elsbuf_cnt = 0;
10823         phba->elsbuf_prev_cnt = 0;
10824         spin_unlock_irqrestore(&phba->hbalock, flags);
10825
10826         while (!list_empty(&completions)) {
10827                 list_remove_head(&completions, buf_ptr,
10828                         struct lpfc_dmabuf, list);
10829                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10830                 kfree(buf_ptr);
10831         }
10832
10833         /* Enable softirqs again, done with phba->hbalock */
10834         local_bh_enable();
10835
10836         /* Return any active mbox cmds */
10837         del_timer_sync(&psli->mbox_tmo);
10838
10839         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10840         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10841         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10842
10843         return 1;
10844 }
10845
10846 /**
10847  * lpfc_sli_pcimem_bcopy - SLI memory copy function
10848  * @srcp: Source memory pointer.
10849  * @destp: Destination memory pointer.
10850  * @cnt: Number of words required to be copied.
10851  *
10852  * This function is used for copying data between driver memory
10853  * and the SLI memory. This function also changes the endianness
10854  * of each word if native endianness is different from SLI
10855  * endianness. This function can be called with or without
10856  * lock.
10857  **/
10858 void
10859 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10860 {
10861         uint32_t *src = srcp;
10862         uint32_t *dest = destp;
10863         uint32_t ldata;
10864         int i;
10865
10866         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10867                 ldata = *src;
10868                 ldata = le32_to_cpu(ldata);
10869                 *dest = ldata;
10870                 src++;
10871                 dest++;
10872         }
10873 }
10874
10875
10876 /**
10877  * lpfc_sli_bemem_bcopy - SLI memory copy function
10878  * @srcp: Source memory pointer.
10879  * @destp: Destination memory pointer.
10880  * @cnt: Number of words required to be copied.
10881  *
10882  * This function is used for copying data between a data structure
10883  * with big endian representation to local endianness.
10884  * This function can be called with or without lock.
10885  **/
10886 void
10887 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10888 {
10889         uint32_t *src = srcp;
10890         uint32_t *dest = destp;
10891         uint32_t ldata;
10892         int i;
10893
10894         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10895                 ldata = *src;
10896                 ldata = be32_to_cpu(ldata);
10897                 *dest = ldata;
10898                 src++;
10899                 dest++;
10900         }
10901 }
10902
10903 /**
10904  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10905  * @phba: Pointer to HBA context object.
10906  * @pring: Pointer to driver SLI ring object.
10907  * @mp: Pointer to driver buffer object.
10908  *
10909  * This function is called with no lock held.
10910  * It always return zero after adding the buffer to the postbufq
10911  * buffer list.
10912  **/
10913 int
10914 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10915                          struct lpfc_dmabuf *mp)
10916 {
10917         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10918            later */
10919         spin_lock_irq(&phba->hbalock);
10920         list_add_tail(&mp->list, &pring->postbufq);
10921         pring->postbufq_cnt++;
10922         spin_unlock_irq(&phba->hbalock);
10923         return 0;
10924 }
10925
10926 /**
10927  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10928  * @phba: Pointer to HBA context object.
10929  *
10930  * When HBQ is enabled, buffers are searched based on tags. This function
10931  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10932  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10933  * does not conflict with tags of buffer posted for unsolicited events.
10934  * The function returns the allocated tag. The function is called with
10935  * no locks held.
10936  **/
10937 uint32_t
10938 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10939 {
10940         spin_lock_irq(&phba->hbalock);
10941         phba->buffer_tag_count++;
10942         /*
10943          * Always set the QUE_BUFTAG_BIT to distiguish between
10944          * a tag assigned by HBQ.
10945          */
10946         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10947         spin_unlock_irq(&phba->hbalock);
10948         return phba->buffer_tag_count;
10949 }
10950
10951 /**
10952  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10953  * @phba: Pointer to HBA context object.
10954  * @pring: Pointer to driver SLI ring object.
10955  * @tag: Buffer tag.
10956  *
10957  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10958  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10959  * iocb is posted to the response ring with the tag of the buffer.
10960  * This function searches the pring->postbufq list using the tag
10961  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10962  * iocb. If the buffer is found then lpfc_dmabuf object of the
10963  * buffer is returned to the caller else NULL is returned.
10964  * This function is called with no lock held.
10965  **/
10966 struct lpfc_dmabuf *
10967 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10968                         uint32_t tag)
10969 {
10970         struct lpfc_dmabuf *mp, *next_mp;
10971         struct list_head *slp = &pring->postbufq;
10972
10973         /* Search postbufq, from the beginning, looking for a match on tag */
10974         spin_lock_irq(&phba->hbalock);
10975         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10976                 if (mp->buffer_tag == tag) {
10977                         list_del_init(&mp->list);
10978                         pring->postbufq_cnt--;
10979                         spin_unlock_irq(&phba->hbalock);
10980                         return mp;
10981                 }
10982         }
10983
10984         spin_unlock_irq(&phba->hbalock);
10985         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10986                         "0402 Cannot find virtual addr for buffer tag on "
10987                         "ring %d Data x%lx x%px x%px x%x\n",
10988                         pring->ringno, (unsigned long) tag,
10989                         slp->next, slp->prev, pring->postbufq_cnt);
10990
10991         return NULL;
10992 }
10993
10994 /**
10995  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10996  * @phba: Pointer to HBA context object.
10997  * @pring: Pointer to driver SLI ring object.
10998  * @phys: DMA address of the buffer.
10999  *
11000  * This function searches the buffer list using the dma_address
11001  * of unsolicited event to find the driver's lpfc_dmabuf object
11002  * corresponding to the dma_address. The function returns the
11003  * lpfc_dmabuf object if a buffer is found else it returns NULL.
11004  * This function is called by the ct and els unsolicited event
11005  * handlers to get the buffer associated with the unsolicited
11006  * event.
11007  *
11008  * This function is called with no lock held.
11009  **/
11010 struct lpfc_dmabuf *
11011 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11012                          dma_addr_t phys)
11013 {
11014         struct lpfc_dmabuf *mp, *next_mp;
11015         struct list_head *slp = &pring->postbufq;
11016
11017         /* Search postbufq, from the beginning, looking for a match on phys */
11018         spin_lock_irq(&phba->hbalock);
11019         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11020                 if (mp->phys == phys) {
11021                         list_del_init(&mp->list);
11022                         pring->postbufq_cnt--;
11023                         spin_unlock_irq(&phba->hbalock);
11024                         return mp;
11025                 }
11026         }
11027
11028         spin_unlock_irq(&phba->hbalock);
11029         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11030                         "0410 Cannot find virtual addr for mapped buf on "
11031                         "ring %d Data x%llx x%px x%px x%x\n",
11032                         pring->ringno, (unsigned long long)phys,
11033                         slp->next, slp->prev, pring->postbufq_cnt);
11034         return NULL;
11035 }
11036
11037 /**
11038  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11039  * @phba: Pointer to HBA context object.
11040  * @cmdiocb: Pointer to driver command iocb object.
11041  * @rspiocb: Pointer to driver response iocb object.
11042  *
11043  * This function is the completion handler for the abort iocbs for
11044  * ELS commands. This function is called from the ELS ring event
11045  * handler with no lock held. This function frees memory resources
11046  * associated with the abort iocb.
11047  **/
11048 static void
11049 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11050                         struct lpfc_iocbq *rspiocb)
11051 {
11052         IOCB_t *irsp = &rspiocb->iocb;
11053         uint16_t abort_iotag, abort_context;
11054         struct lpfc_iocbq *abort_iocb = NULL;
11055
11056         if (irsp->ulpStatus) {
11057
11058                 /*
11059                  * Assume that the port already completed and returned, or
11060                  * will return the iocb. Just Log the message.
11061                  */
11062                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11063                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11064
11065                 spin_lock_irq(&phba->hbalock);
11066                 if (phba->sli_rev < LPFC_SLI_REV4) {
11067                         if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11068                             irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11069                             irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11070                                 spin_unlock_irq(&phba->hbalock);
11071                                 goto release_iocb;
11072                         }
11073                         if (abort_iotag != 0 &&
11074                                 abort_iotag <= phba->sli.last_iotag)
11075                                 abort_iocb =
11076                                         phba->sli.iocbq_lookup[abort_iotag];
11077                 } else
11078                         /* For sli4 the abort_tag is the XRI,
11079                          * so the abort routine puts the iotag  of the iocb
11080                          * being aborted in the context field of the abort
11081                          * IOCB.
11082                          */
11083                         abort_iocb = phba->sli.iocbq_lookup[abort_context];
11084
11085                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11086                                 "0327 Cannot abort els iocb x%px "
11087                                 "with tag %x context %x, abort status %x, "
11088                                 "abort code %x\n",
11089                                 abort_iocb, abort_iotag, abort_context,
11090                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
11091
11092                 spin_unlock_irq(&phba->hbalock);
11093         }
11094 release_iocb:
11095         lpfc_sli_release_iocbq(phba, cmdiocb);
11096         return;
11097 }
11098
11099 /**
11100  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11101  * @phba: Pointer to HBA context object.
11102  * @cmdiocb: Pointer to driver command iocb object.
11103  * @rspiocb: Pointer to driver response iocb object.
11104  *
11105  * The function is called from SLI ring event handler with no
11106  * lock held. This function is the completion handler for ELS commands
11107  * which are aborted. The function frees memory resources used for
11108  * the aborted ELS commands.
11109  **/
11110 static void
11111 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11112                      struct lpfc_iocbq *rspiocb)
11113 {
11114         IOCB_t *irsp = &rspiocb->iocb;
11115
11116         /* ELS cmd tag <ulpIoTag> completes */
11117         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11118                         "0139 Ignoring ELS cmd tag x%x completion Data: "
11119                         "x%x x%x x%x\n",
11120                         irsp->ulpIoTag, irsp->ulpStatus,
11121                         irsp->un.ulpWord[4], irsp->ulpTimeout);
11122         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11123                 lpfc_ct_free_iocb(phba, cmdiocb);
11124         else
11125                 lpfc_els_free_iocb(phba, cmdiocb);
11126         return;
11127 }
11128
11129 /**
11130  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11131  * @phba: Pointer to HBA context object.
11132  * @pring: Pointer to driver SLI ring object.
11133  * @cmdiocb: Pointer to driver command iocb object.
11134  *
11135  * This function issues an abort iocb for the provided command iocb down to
11136  * the port. Other than the case the outstanding command iocb is an abort
11137  * request, this function issues abort out unconditionally. This function is
11138  * called with hbalock held. The function returns 0 when it fails due to
11139  * memory allocation failure or when the command iocb is an abort request.
11140  **/
11141 static int
11142 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11143                            struct lpfc_iocbq *cmdiocb)
11144 {
11145         struct lpfc_vport *vport = cmdiocb->vport;
11146         struct lpfc_iocbq *abtsiocbp;
11147         IOCB_t *icmd = NULL;
11148         IOCB_t *iabt = NULL;
11149         int retval;
11150         unsigned long iflags;
11151         struct lpfc_nodelist *ndlp;
11152
11153         lockdep_assert_held(&phba->hbalock);
11154
11155         /*
11156          * There are certain command types we don't want to abort.  And we
11157          * don't want to abort commands that are already in the process of
11158          * being aborted.
11159          */
11160         icmd = &cmdiocb->iocb;
11161         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11162             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11163             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11164                 return 0;
11165
11166         /* issue ABTS for this IOCB based on iotag */
11167         abtsiocbp = __lpfc_sli_get_iocbq(phba);
11168         if (abtsiocbp == NULL)
11169                 return 0;
11170
11171         /* This signals the response to set the correct status
11172          * before calling the completion handler
11173          */
11174         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11175
11176         iabt = &abtsiocbp->iocb;
11177         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11178         iabt->un.acxri.abortContextTag = icmd->ulpContext;
11179         if (phba->sli_rev == LPFC_SLI_REV4) {
11180                 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11181                 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11182         } else {
11183                 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11184                 if (pring->ringno == LPFC_ELS_RING) {
11185                         ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11186                         iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11187                 }
11188         }
11189         iabt->ulpLe = 1;
11190         iabt->ulpClass = icmd->ulpClass;
11191
11192         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11193         abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11194         if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11195                 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11196         if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11197                 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11198
11199         if (phba->link_state >= LPFC_LINK_UP)
11200                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11201         else
11202                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11203
11204         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11205         abtsiocbp->vport = vport;
11206
11207         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11208                          "0339 Abort xri x%x, original iotag x%x, "
11209                          "abort cmd iotag x%x\n",
11210                          iabt->un.acxri.abortIoTag,
11211                          iabt->un.acxri.abortContextTag,
11212                          abtsiocbp->iotag);
11213
11214         if (phba->sli_rev == LPFC_SLI_REV4) {
11215                 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11216                 if (unlikely(pring == NULL))
11217                         return 0;
11218                 /* Note: both hbalock and ring_lock need to be set here */
11219                 spin_lock_irqsave(&pring->ring_lock, iflags);
11220                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11221                         abtsiocbp, 0);
11222                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11223         } else {
11224                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11225                         abtsiocbp, 0);
11226         }
11227
11228         if (retval)
11229                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11230
11231         /*
11232          * Caller to this routine should check for IOCB_ERROR
11233          * and handle it properly.  This routine no longer removes
11234          * iocb off txcmplq and call compl in case of IOCB_ERROR.
11235          */
11236         return retval;
11237 }
11238
11239 /**
11240  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11241  * @phba: Pointer to HBA context object.
11242  * @pring: Pointer to driver SLI ring object.
11243  * @cmdiocb: Pointer to driver command iocb object.
11244  *
11245  * This function issues an abort iocb for the provided command iocb. In case
11246  * of unloading, the abort iocb will not be issued to commands on the ELS
11247  * ring. Instead, the callback function shall be changed to those commands
11248  * so that nothing happens when them finishes. This function is called with
11249  * hbalock held. The function returns 0 when the command iocb is an abort
11250  * request.
11251  **/
11252 int
11253 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11254                            struct lpfc_iocbq *cmdiocb)
11255 {
11256         struct lpfc_vport *vport = cmdiocb->vport;
11257         int retval = IOCB_ERROR;
11258         IOCB_t *icmd = NULL;
11259
11260         lockdep_assert_held(&phba->hbalock);
11261
11262         /*
11263          * There are certain command types we don't want to abort.  And we
11264          * don't want to abort commands that are already in the process of
11265          * being aborted.
11266          */
11267         icmd = &cmdiocb->iocb;
11268         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11269             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11270             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11271                 return 0;
11272
11273         if (!pring) {
11274                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11275                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11276                 else
11277                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11278                 goto abort_iotag_exit;
11279         }
11280
11281         /*
11282          * If we're unloading, don't abort iocb on the ELS ring, but change
11283          * the callback so that nothing happens when it finishes.
11284          */
11285         if ((vport->load_flag & FC_UNLOADING) &&
11286             (pring->ringno == LPFC_ELS_RING)) {
11287                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11288                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11289                 else
11290                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11291                 goto abort_iotag_exit;
11292         }
11293
11294         /* Now, we try to issue the abort to the cmdiocb out */
11295         retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11296
11297 abort_iotag_exit:
11298         /*
11299          * Caller to this routine should check for IOCB_ERROR
11300          * and handle it properly.  This routine no longer removes
11301          * iocb off txcmplq and call compl in case of IOCB_ERROR.
11302          */
11303         return retval;
11304 }
11305
11306 /**
11307  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11308  * @phba: pointer to lpfc HBA data structure.
11309  *
11310  * This routine will abort all pending and outstanding iocbs to an HBA.
11311  **/
11312 void
11313 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11314 {
11315         struct lpfc_sli *psli = &phba->sli;
11316         struct lpfc_sli_ring *pring;
11317         struct lpfc_queue *qp = NULL;
11318         int i;
11319
11320         if (phba->sli_rev != LPFC_SLI_REV4) {
11321                 for (i = 0; i < psli->num_rings; i++) {
11322                         pring = &psli->sli3_ring[i];
11323                         lpfc_sli_abort_iocb_ring(phba, pring);
11324                 }
11325                 return;
11326         }
11327         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11328                 pring = qp->pring;
11329                 if (!pring)
11330                         continue;
11331                 lpfc_sli_abort_iocb_ring(phba, pring);
11332         }
11333 }
11334
11335 /**
11336  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11337  * @iocbq: Pointer to driver iocb object.
11338  * @vport: Pointer to driver virtual port object.
11339  * @tgt_id: SCSI ID of the target.
11340  * @lun_id: LUN ID of the scsi device.
11341  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11342  *
11343  * This function acts as an iocb filter for functions which abort or count
11344  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11345  * 0 if the filtering criteria is met for the given iocb and will return
11346  * 1 if the filtering criteria is not met.
11347  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11348  * given iocb is for the SCSI device specified by vport, tgt_id and
11349  * lun_id parameter.
11350  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
11351  * given iocb is for the SCSI target specified by vport and tgt_id
11352  * parameters.
11353  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11354  * given iocb is for the SCSI host associated with the given vport.
11355  * This function is called with no locks held.
11356  **/
11357 static int
11358 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11359                            uint16_t tgt_id, uint64_t lun_id,
11360                            lpfc_ctx_cmd ctx_cmd)
11361 {
11362         struct lpfc_io_buf *lpfc_cmd;
11363         int rc = 1;
11364
11365         if (iocbq->vport != vport)
11366                 return rc;
11367
11368         if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
11369             !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11370                 return rc;
11371
11372         lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11373
11374         if (lpfc_cmd->pCmd == NULL)
11375                 return rc;
11376
11377         switch (ctx_cmd) {
11378         case LPFC_CTX_LUN:
11379                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11380                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11381                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11382                         rc = 0;
11383                 break;
11384         case LPFC_CTX_TGT:
11385                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11386                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11387                         rc = 0;
11388                 break;
11389         case LPFC_CTX_HOST:
11390                 rc = 0;
11391                 break;
11392         default:
11393                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11394                         __func__, ctx_cmd);
11395                 break;
11396         }
11397
11398         return rc;
11399 }
11400
11401 /**
11402  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11403  * @vport: Pointer to virtual port.
11404  * @tgt_id: SCSI ID of the target.
11405  * @lun_id: LUN ID of the scsi device.
11406  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11407  *
11408  * This function returns number of FCP commands pending for the vport.
11409  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11410  * commands pending on the vport associated with SCSI device specified
11411  * by tgt_id and lun_id parameters.
11412  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11413  * commands pending on the vport associated with SCSI target specified
11414  * by tgt_id parameter.
11415  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11416  * commands pending on the vport.
11417  * This function returns the number of iocbs which satisfy the filter.
11418  * This function is called without any lock held.
11419  **/
11420 int
11421 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11422                   lpfc_ctx_cmd ctx_cmd)
11423 {
11424         struct lpfc_hba *phba = vport->phba;
11425         struct lpfc_iocbq *iocbq;
11426         int sum, i;
11427
11428         spin_lock_irq(&phba->hbalock);
11429         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11430                 iocbq = phba->sli.iocbq_lookup[i];
11431
11432                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11433                                                 ctx_cmd) == 0)
11434                         sum++;
11435         }
11436         spin_unlock_irq(&phba->hbalock);
11437
11438         return sum;
11439 }
11440
11441 /**
11442  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11443  * @phba: Pointer to HBA context object
11444  * @cmdiocb: Pointer to command iocb object.
11445  * @rspiocb: Pointer to response iocb object.
11446  *
11447  * This function is called when an aborted FCP iocb completes. This
11448  * function is called by the ring event handler with no lock held.
11449  * This function frees the iocb.
11450  **/
11451 void
11452 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11453                         struct lpfc_iocbq *rspiocb)
11454 {
11455         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11456                         "3096 ABORT_XRI_CN completing on rpi x%x "
11457                         "original iotag x%x, abort cmd iotag x%x "
11458                         "status 0x%x, reason 0x%x\n",
11459                         cmdiocb->iocb.un.acxri.abortContextTag,
11460                         cmdiocb->iocb.un.acxri.abortIoTag,
11461                         cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11462                         rspiocb->iocb.un.ulpWord[4]);
11463         lpfc_sli_release_iocbq(phba, cmdiocb);
11464         return;
11465 }
11466
11467 /**
11468  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11469  * @vport: Pointer to virtual port.
11470  * @pring: Pointer to driver SLI ring object.
11471  * @tgt_id: SCSI ID of the target.
11472  * @lun_id: LUN ID of the scsi device.
11473  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11474  *
11475  * This function sends an abort command for every SCSI command
11476  * associated with the given virtual port pending on the ring
11477  * filtered by lpfc_sli_validate_fcp_iocb function.
11478  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11479  * FCP iocbs associated with lun specified by tgt_id and lun_id
11480  * parameters
11481  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11482  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11483  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11484  * FCP iocbs associated with virtual port.
11485  * This function returns number of iocbs it failed to abort.
11486  * This function is called with no locks held.
11487  **/
11488 int
11489 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11490                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11491 {
11492         struct lpfc_hba *phba = vport->phba;
11493         struct lpfc_iocbq *iocbq;
11494         struct lpfc_iocbq *abtsiocb;
11495         struct lpfc_sli_ring *pring_s4;
11496         IOCB_t *cmd = NULL;
11497         int errcnt = 0, ret_val = 0;
11498         int i;
11499
11500         /* all I/Os are in process of being flushed */
11501         if (phba->hba_flag & HBA_IOQ_FLUSH)
11502                 return errcnt;
11503
11504         for (i = 1; i <= phba->sli.last_iotag; i++) {
11505                 iocbq = phba->sli.iocbq_lookup[i];
11506
11507                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11508                                                abort_cmd) != 0)
11509                         continue;
11510
11511                 /*
11512                  * If the iocbq is already being aborted, don't take a second
11513                  * action, but do count it.
11514                  */
11515                 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11516                         continue;
11517
11518                 /* issue ABTS for this IOCB based on iotag */
11519                 abtsiocb = lpfc_sli_get_iocbq(phba);
11520                 if (abtsiocb == NULL) {
11521                         errcnt++;
11522                         continue;
11523                 }
11524
11525                 /* indicate the IO is being aborted by the driver. */
11526                 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11527
11528                 cmd = &iocbq->iocb;
11529                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11530                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11531                 if (phba->sli_rev == LPFC_SLI_REV4)
11532                         abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11533                 else
11534                         abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11535                 abtsiocb->iocb.ulpLe = 1;
11536                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11537                 abtsiocb->vport = vport;
11538
11539                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11540                 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11541                 if (iocbq->iocb_flag & LPFC_IO_FCP)
11542                         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11543                 if (iocbq->iocb_flag & LPFC_IO_FOF)
11544                         abtsiocb->iocb_flag |= LPFC_IO_FOF;
11545
11546                 if (lpfc_is_link_up(phba))
11547                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11548                 else
11549                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11550
11551                 /* Setup callback routine and issue the command. */
11552                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11553                 if (phba->sli_rev == LPFC_SLI_REV4) {
11554                         pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11555                         if (!pring_s4)
11556                                 continue;
11557                         ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11558                                                       abtsiocb, 0);
11559                 } else
11560                         ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11561                                                       abtsiocb, 0);
11562                 if (ret_val == IOCB_ERROR) {
11563                         lpfc_sli_release_iocbq(phba, abtsiocb);
11564                         errcnt++;
11565                         continue;
11566                 }
11567         }
11568
11569         return errcnt;
11570 }
11571
11572 /**
11573  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11574  * @vport: Pointer to virtual port.
11575  * @pring: Pointer to driver SLI ring object.
11576  * @tgt_id: SCSI ID of the target.
11577  * @lun_id: LUN ID of the scsi device.
11578  * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11579  *
11580  * This function sends an abort command for every SCSI command
11581  * associated with the given virtual port pending on the ring
11582  * filtered by lpfc_sli_validate_fcp_iocb function.
11583  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11584  * FCP iocbs associated with lun specified by tgt_id and lun_id
11585  * parameters
11586  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11587  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11588  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11589  * FCP iocbs associated with virtual port.
11590  * This function returns number of iocbs it aborted .
11591  * This function is called with no locks held right after a taskmgmt
11592  * command is sent.
11593  **/
11594 int
11595 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11596                         uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11597 {
11598         struct lpfc_hba *phba = vport->phba;
11599         struct lpfc_io_buf *lpfc_cmd;
11600         struct lpfc_iocbq *abtsiocbq;
11601         struct lpfc_nodelist *ndlp;
11602         struct lpfc_iocbq *iocbq;
11603         IOCB_t *icmd;
11604         int sum, i, ret_val;
11605         unsigned long iflags;
11606         struct lpfc_sli_ring *pring_s4 = NULL;
11607
11608         spin_lock_irqsave(&phba->hbalock, iflags);
11609
11610         /* all I/Os are in process of being flushed */
11611         if (phba->hba_flag & HBA_IOQ_FLUSH) {
11612                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11613                 return 0;
11614         }
11615         sum = 0;
11616
11617         for (i = 1; i <= phba->sli.last_iotag; i++) {
11618                 iocbq = phba->sli.iocbq_lookup[i];
11619
11620                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11621                                                cmd) != 0)
11622                         continue;
11623
11624                 /* Guard against IO completion being called at same time */
11625                 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11626                 spin_lock(&lpfc_cmd->buf_lock);
11627
11628                 if (!lpfc_cmd->pCmd) {
11629                         spin_unlock(&lpfc_cmd->buf_lock);
11630                         continue;
11631                 }
11632
11633                 if (phba->sli_rev == LPFC_SLI_REV4) {
11634                         pring_s4 =
11635                             phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11636                         if (!pring_s4) {
11637                                 spin_unlock(&lpfc_cmd->buf_lock);
11638                                 continue;
11639                         }
11640                         /* Note: both hbalock and ring_lock must be set here */
11641                         spin_lock(&pring_s4->ring_lock);
11642                 }
11643
11644                 /*
11645                  * If the iocbq is already being aborted, don't take a second
11646                  * action, but do count it.
11647                  */
11648                 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11649                     !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11650                         if (phba->sli_rev == LPFC_SLI_REV4)
11651                                 spin_unlock(&pring_s4->ring_lock);
11652                         spin_unlock(&lpfc_cmd->buf_lock);
11653                         continue;
11654                 }
11655
11656                 /* issue ABTS for this IOCB based on iotag */
11657                 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11658                 if (!abtsiocbq) {
11659                         if (phba->sli_rev == LPFC_SLI_REV4)
11660                                 spin_unlock(&pring_s4->ring_lock);
11661                         spin_unlock(&lpfc_cmd->buf_lock);
11662                         continue;
11663                 }
11664
11665                 icmd = &iocbq->iocb;
11666                 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11667                 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11668                 if (phba->sli_rev == LPFC_SLI_REV4)
11669                         abtsiocbq->iocb.un.acxri.abortIoTag =
11670                                                          iocbq->sli4_xritag;
11671                 else
11672                         abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11673                 abtsiocbq->iocb.ulpLe = 1;
11674                 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11675                 abtsiocbq->vport = vport;
11676
11677                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11678                 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11679                 if (iocbq->iocb_flag & LPFC_IO_FCP)
11680                         abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11681                 if (iocbq->iocb_flag & LPFC_IO_FOF)
11682                         abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11683
11684                 ndlp = lpfc_cmd->rdata->pnode;
11685
11686                 if (lpfc_is_link_up(phba) &&
11687                     (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11688                         abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11689                 else
11690                         abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11691
11692                 /* Setup callback routine and issue the command. */
11693                 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11694
11695                 /*
11696                  * Indicate the IO is being aborted by the driver and set
11697                  * the caller's flag into the aborted IO.
11698                  */
11699                 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11700
11701                 if (phba->sli_rev == LPFC_SLI_REV4) {
11702                         ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11703                                                         abtsiocbq, 0);
11704                         spin_unlock(&pring_s4->ring_lock);
11705                 } else {
11706                         ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11707                                                         abtsiocbq, 0);
11708                 }
11709
11710                 spin_unlock(&lpfc_cmd->buf_lock);
11711
11712                 if (ret_val == IOCB_ERROR)
11713                         __lpfc_sli_release_iocbq(phba, abtsiocbq);
11714                 else
11715                         sum++;
11716         }
11717         spin_unlock_irqrestore(&phba->hbalock, iflags);
11718         return sum;
11719 }
11720
11721 /**
11722  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11723  * @phba: Pointer to HBA context object.
11724  * @cmdiocbq: Pointer to command iocb.
11725  * @rspiocbq: Pointer to response iocb.
11726  *
11727  * This function is the completion handler for iocbs issued using
11728  * lpfc_sli_issue_iocb_wait function. This function is called by the
11729  * ring event handler function without any lock held. This function
11730  * can be called from both worker thread context and interrupt
11731  * context. This function also can be called from other thread which
11732  * cleans up the SLI layer objects.
11733  * This function copy the contents of the response iocb to the
11734  * response iocb memory object provided by the caller of
11735  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11736  * sleeps for the iocb completion.
11737  **/
11738 static void
11739 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11740                         struct lpfc_iocbq *cmdiocbq,
11741                         struct lpfc_iocbq *rspiocbq)
11742 {
11743         wait_queue_head_t *pdone_q;
11744         unsigned long iflags;
11745         struct lpfc_io_buf *lpfc_cmd;
11746
11747         spin_lock_irqsave(&phba->hbalock, iflags);
11748         if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11749
11750                 /*
11751                  * A time out has occurred for the iocb.  If a time out
11752                  * completion handler has been supplied, call it.  Otherwise,
11753                  * just free the iocbq.
11754                  */
11755
11756                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11757                 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11758                 cmdiocbq->wait_iocb_cmpl = NULL;
11759                 if (cmdiocbq->iocb_cmpl)
11760                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11761                 else
11762                         lpfc_sli_release_iocbq(phba, cmdiocbq);
11763                 return;
11764         }
11765
11766         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11767         if (cmdiocbq->context2 && rspiocbq)
11768                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11769                        &rspiocbq->iocb, sizeof(IOCB_t));
11770
11771         /* Set the exchange busy flag for task management commands */
11772         if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11773                 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11774                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11775                         cur_iocbq);
11776                 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11777         }
11778
11779         pdone_q = cmdiocbq->context_un.wait_queue;
11780         if (pdone_q)
11781                 wake_up(pdone_q);
11782         spin_unlock_irqrestore(&phba->hbalock, iflags);
11783         return;
11784 }
11785
11786 /**
11787  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11788  * @phba: Pointer to HBA context object..
11789  * @piocbq: Pointer to command iocb.
11790  * @flag: Flag to test.
11791  *
11792  * This routine grabs the hbalock and then test the iocb_flag to
11793  * see if the passed in flag is set.
11794  * Returns:
11795  * 1 if flag is set.
11796  * 0 if flag is not set.
11797  **/
11798 static int
11799 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11800                  struct lpfc_iocbq *piocbq, uint32_t flag)
11801 {
11802         unsigned long iflags;
11803         int ret;
11804
11805         spin_lock_irqsave(&phba->hbalock, iflags);
11806         ret = piocbq->iocb_flag & flag;
11807         spin_unlock_irqrestore(&phba->hbalock, iflags);
11808         return ret;
11809
11810 }
11811
11812 /**
11813  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11814  * @phba: Pointer to HBA context object..
11815  * @pring: Pointer to sli ring.
11816  * @piocb: Pointer to command iocb.
11817  * @prspiocbq: Pointer to response iocb.
11818  * @timeout: Timeout in number of seconds.
11819  *
11820  * This function issues the iocb to firmware and waits for the
11821  * iocb to complete. The iocb_cmpl field of the shall be used
11822  * to handle iocbs which time out. If the field is NULL, the
11823  * function shall free the iocbq structure.  If more clean up is
11824  * needed, the caller is expected to provide a completion function
11825  * that will provide the needed clean up.  If the iocb command is
11826  * not completed within timeout seconds, the function will either
11827  * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11828  * completion function set in the iocb_cmpl field and then return
11829  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
11830  * resources if this function returns IOCB_TIMEDOUT.
11831  * The function waits for the iocb completion using an
11832  * non-interruptible wait.
11833  * This function will sleep while waiting for iocb completion.
11834  * So, this function should not be called from any context which
11835  * does not allow sleeping. Due to the same reason, this function
11836  * cannot be called with interrupt disabled.
11837  * This function assumes that the iocb completions occur while
11838  * this function sleep. So, this function cannot be called from
11839  * the thread which process iocb completion for this ring.
11840  * This function clears the iocb_flag of the iocb object before
11841  * issuing the iocb and the iocb completion handler sets this
11842  * flag and wakes this thread when the iocb completes.
11843  * The contents of the response iocb will be copied to prspiocbq
11844  * by the completion handler when the command completes.
11845  * This function returns IOCB_SUCCESS when success.
11846  * This function is called with no lock held.
11847  **/
11848 int
11849 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11850                          uint32_t ring_number,
11851                          struct lpfc_iocbq *piocb,
11852                          struct lpfc_iocbq *prspiocbq,
11853                          uint32_t timeout)
11854 {
11855         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11856         long timeleft, timeout_req = 0;
11857         int retval = IOCB_SUCCESS;
11858         uint32_t creg_val;
11859         struct lpfc_iocbq *iocb;
11860         int txq_cnt = 0;
11861         int txcmplq_cnt = 0;
11862         struct lpfc_sli_ring *pring;
11863         unsigned long iflags;
11864         bool iocb_completed = true;
11865
11866         if (phba->sli_rev >= LPFC_SLI_REV4)
11867                 pring = lpfc_sli4_calc_ring(phba, piocb);
11868         else
11869                 pring = &phba->sli.sli3_ring[ring_number];
11870         /*
11871          * If the caller has provided a response iocbq buffer, then context2
11872          * is NULL or its an error.
11873          */
11874         if (prspiocbq) {
11875                 if (piocb->context2)
11876                         return IOCB_ERROR;
11877                 piocb->context2 = prspiocbq;
11878         }
11879
11880         piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11881         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11882         piocb->context_un.wait_queue = &done_q;
11883         piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11884
11885         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11886                 if (lpfc_readl(phba->HCregaddr, &creg_val))
11887                         return IOCB_ERROR;
11888                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11889                 writel(creg_val, phba->HCregaddr);
11890                 readl(phba->HCregaddr); /* flush */
11891         }
11892
11893         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11894                                      SLI_IOCB_RET_IOCB);
11895         if (retval == IOCB_SUCCESS) {
11896                 timeout_req = msecs_to_jiffies(timeout * 1000);
11897                 timeleft = wait_event_timeout(done_q,
11898                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11899                                 timeout_req);
11900                 spin_lock_irqsave(&phba->hbalock, iflags);
11901                 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11902
11903                         /*
11904                          * IOCB timed out.  Inform the wake iocb wait
11905                          * completion function and set local status
11906                          */
11907
11908                         iocb_completed = false;
11909                         piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11910                 }
11911                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11912                 if (iocb_completed) {
11913                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11914                                         "0331 IOCB wake signaled\n");
11915                         /* Note: we are not indicating if the IOCB has a success
11916                          * status or not - that's for the caller to check.
11917                          * IOCB_SUCCESS means just that the command was sent and
11918                          * completed. Not that it completed successfully.
11919                          * */
11920                 } else if (timeleft == 0) {
11921                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11922                                         "0338 IOCB wait timeout error - no "
11923                                         "wake response Data x%x\n", timeout);
11924                         retval = IOCB_TIMEDOUT;
11925                 } else {
11926                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11927                                         "0330 IOCB wake NOT set, "
11928                                         "Data x%x x%lx\n",
11929                                         timeout, (timeleft / jiffies));
11930                         retval = IOCB_TIMEDOUT;
11931                 }
11932         } else if (retval == IOCB_BUSY) {
11933                 if (phba->cfg_log_verbose & LOG_SLI) {
11934                         list_for_each_entry(iocb, &pring->txq, list) {
11935                                 txq_cnt++;
11936                         }
11937                         list_for_each_entry(iocb, &pring->txcmplq, list) {
11938                                 txcmplq_cnt++;
11939                         }
11940                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11941                                 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11942                                 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11943                 }
11944                 return retval;
11945         } else {
11946                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11947                                 "0332 IOCB wait issue failed, Data x%x\n",
11948                                 retval);
11949                 retval = IOCB_ERROR;
11950         }
11951
11952         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11953                 if (lpfc_readl(phba->HCregaddr, &creg_val))
11954                         return IOCB_ERROR;
11955                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11956                 writel(creg_val, phba->HCregaddr);
11957                 readl(phba->HCregaddr); /* flush */
11958         }
11959
11960         if (prspiocbq)
11961                 piocb->context2 = NULL;
11962
11963         piocb->context_un.wait_queue = NULL;
11964         piocb->iocb_cmpl = NULL;
11965         return retval;
11966 }
11967
11968 /**
11969  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11970  * @phba: Pointer to HBA context object.
11971  * @pmboxq: Pointer to driver mailbox object.
11972  * @timeout: Timeout in number of seconds.
11973  *
11974  * This function issues the mailbox to firmware and waits for the
11975  * mailbox command to complete. If the mailbox command is not
11976  * completed within timeout seconds, it returns MBX_TIMEOUT.
11977  * The function waits for the mailbox completion using an
11978  * interruptible wait. If the thread is woken up due to a
11979  * signal, MBX_TIMEOUT error is returned to the caller. Caller
11980  * should not free the mailbox resources, if this function returns
11981  * MBX_TIMEOUT.
11982  * This function will sleep while waiting for mailbox completion.
11983  * So, this function should not be called from any context which
11984  * does not allow sleeping. Due to the same reason, this function
11985  * cannot be called with interrupt disabled.
11986  * This function assumes that the mailbox completion occurs while
11987  * this function sleep. So, this function cannot be called from
11988  * the worker thread which processes mailbox completion.
11989  * This function is called in the context of HBA management
11990  * applications.
11991  * This function returns MBX_SUCCESS when successful.
11992  * This function is called with no lock held.
11993  **/
11994 int
11995 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11996                          uint32_t timeout)
11997 {
11998         struct completion mbox_done;
11999         int retval;
12000         unsigned long flag;
12001
12002         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12003         /* setup wake call as IOCB callback */
12004         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12005
12006         /* setup context3 field to pass wait_queue pointer to wake function  */
12007         init_completion(&mbox_done);
12008         pmboxq->context3 = &mbox_done;
12009         /* now issue the command */
12010         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12011         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12012                 wait_for_completion_timeout(&mbox_done,
12013                                             msecs_to_jiffies(timeout * 1000));
12014
12015                 spin_lock_irqsave(&phba->hbalock, flag);
12016                 pmboxq->context3 = NULL;
12017                 /*
12018                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
12019                  * else do not free the resources.
12020                  */
12021                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12022                         retval = MBX_SUCCESS;
12023                 } else {
12024                         retval = MBX_TIMEOUT;
12025                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12026                 }
12027                 spin_unlock_irqrestore(&phba->hbalock, flag);
12028         }
12029         return retval;
12030 }
12031
12032 /**
12033  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12034  * @phba: Pointer to HBA context.
12035  *
12036  * This function is called to shutdown the driver's mailbox sub-system.
12037  * It first marks the mailbox sub-system is in a block state to prevent
12038  * the asynchronous mailbox command from issued off the pending mailbox
12039  * command queue. If the mailbox command sub-system shutdown is due to
12040  * HBA error conditions such as EEH or ERATT, this routine shall invoke
12041  * the mailbox sub-system flush routine to forcefully bring down the
12042  * mailbox sub-system. Otherwise, if it is due to normal condition (such
12043  * as with offline or HBA function reset), this routine will wait for the
12044  * outstanding mailbox command to complete before invoking the mailbox
12045  * sub-system flush routine to gracefully bring down mailbox sub-system.
12046  **/
12047 void
12048 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12049 {
12050         struct lpfc_sli *psli = &phba->sli;
12051         unsigned long timeout;
12052
12053         if (mbx_action == LPFC_MBX_NO_WAIT) {
12054                 /* delay 100ms for port state */
12055                 msleep(100);
12056                 lpfc_sli_mbox_sys_flush(phba);
12057                 return;
12058         }
12059         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12060
12061         /* Disable softirqs, including timers from obtaining phba->hbalock */
12062         local_bh_disable();
12063
12064         spin_lock_irq(&phba->hbalock);
12065         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12066
12067         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12068                 /* Determine how long we might wait for the active mailbox
12069                  * command to be gracefully completed by firmware.
12070                  */
12071                 if (phba->sli.mbox_active)
12072                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12073                                                 phba->sli.mbox_active) *
12074                                                 1000) + jiffies;
12075                 spin_unlock_irq(&phba->hbalock);
12076
12077                 /* Enable softirqs again, done with phba->hbalock */
12078                 local_bh_enable();
12079
12080                 while (phba->sli.mbox_active) {
12081                         /* Check active mailbox complete status every 2ms */
12082                         msleep(2);
12083                         if (time_after(jiffies, timeout))
12084                                 /* Timeout, let the mailbox flush routine to
12085                                  * forcefully release active mailbox command
12086                                  */
12087                                 break;
12088                 }
12089         } else {
12090                 spin_unlock_irq(&phba->hbalock);
12091
12092                 /* Enable softirqs again, done with phba->hbalock */
12093                 local_bh_enable();
12094         }
12095
12096         lpfc_sli_mbox_sys_flush(phba);
12097 }
12098
12099 /**
12100  * lpfc_sli_eratt_read - read sli-3 error attention events
12101  * @phba: Pointer to HBA context.
12102  *
12103  * This function is called to read the SLI3 device error attention registers
12104  * for possible error attention events. The caller must hold the hostlock
12105  * with spin_lock_irq().
12106  *
12107  * This function returns 1 when there is Error Attention in the Host Attention
12108  * Register and returns 0 otherwise.
12109  **/
12110 static int
12111 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12112 {
12113         uint32_t ha_copy;
12114
12115         /* Read chip Host Attention (HA) register */
12116         if (lpfc_readl(phba->HAregaddr, &ha_copy))
12117                 goto unplug_err;
12118
12119         if (ha_copy & HA_ERATT) {
12120                 /* Read host status register to retrieve error event */
12121                 if (lpfc_sli_read_hs(phba))
12122                         goto unplug_err;
12123
12124                 /* Check if there is a deferred error condition is active */
12125                 if ((HS_FFER1 & phba->work_hs) &&
12126                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12127                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12128                         phba->hba_flag |= DEFER_ERATT;
12129                         /* Clear all interrupt enable conditions */
12130                         writel(0, phba->HCregaddr);
12131                         readl(phba->HCregaddr);
12132                 }
12133
12134                 /* Set the driver HA work bitmap */
12135                 phba->work_ha |= HA_ERATT;
12136                 /* Indicate polling handles this ERATT */
12137                 phba->hba_flag |= HBA_ERATT_HANDLED;
12138                 return 1;
12139         }
12140         return 0;
12141
12142 unplug_err:
12143         /* Set the driver HS work bitmap */
12144         phba->work_hs |= UNPLUG_ERR;
12145         /* Set the driver HA work bitmap */
12146         phba->work_ha |= HA_ERATT;
12147         /* Indicate polling handles this ERATT */
12148         phba->hba_flag |= HBA_ERATT_HANDLED;
12149         return 1;
12150 }
12151
12152 /**
12153  * lpfc_sli4_eratt_read - read sli-4 error attention events
12154  * @phba: Pointer to HBA context.
12155  *
12156  * This function is called to read the SLI4 device error attention registers
12157  * for possible error attention events. The caller must hold the hostlock
12158  * with spin_lock_irq().
12159  *
12160  * This function returns 1 when there is Error Attention in the Host Attention
12161  * Register and returns 0 otherwise.
12162  **/
12163 static int
12164 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12165 {
12166         uint32_t uerr_sta_hi, uerr_sta_lo;
12167         uint32_t if_type, portsmphr;
12168         struct lpfc_register portstat_reg;
12169
12170         /*
12171          * For now, use the SLI4 device internal unrecoverable error
12172          * registers for error attention. This can be changed later.
12173          */
12174         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12175         switch (if_type) {
12176         case LPFC_SLI_INTF_IF_TYPE_0:
12177                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12178                         &uerr_sta_lo) ||
12179                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12180                         &uerr_sta_hi)) {
12181                         phba->work_hs |= UNPLUG_ERR;
12182                         phba->work_ha |= HA_ERATT;
12183                         phba->hba_flag |= HBA_ERATT_HANDLED;
12184                         return 1;
12185                 }
12186                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12187                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12188                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12189                                         "1423 HBA Unrecoverable error: "
12190                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12191                                         "ue_mask_lo_reg=0x%x, "
12192                                         "ue_mask_hi_reg=0x%x\n",
12193                                         uerr_sta_lo, uerr_sta_hi,
12194                                         phba->sli4_hba.ue_mask_lo,
12195                                         phba->sli4_hba.ue_mask_hi);
12196                         phba->work_status[0] = uerr_sta_lo;
12197                         phba->work_status[1] = uerr_sta_hi;
12198                         phba->work_ha |= HA_ERATT;
12199                         phba->hba_flag |= HBA_ERATT_HANDLED;
12200                         return 1;
12201                 }
12202                 break;
12203         case LPFC_SLI_INTF_IF_TYPE_2:
12204         case LPFC_SLI_INTF_IF_TYPE_6:
12205                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12206                         &portstat_reg.word0) ||
12207                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12208                         &portsmphr)){
12209                         phba->work_hs |= UNPLUG_ERR;
12210                         phba->work_ha |= HA_ERATT;
12211                         phba->hba_flag |= HBA_ERATT_HANDLED;
12212                         return 1;
12213                 }
12214                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12215                         phba->work_status[0] =
12216                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12217                         phba->work_status[1] =
12218                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12219                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12220                                         "2885 Port Status Event: "
12221                                         "port status reg 0x%x, "
12222                                         "port smphr reg 0x%x, "
12223                                         "error 1=0x%x, error 2=0x%x\n",
12224                                         portstat_reg.word0,
12225                                         portsmphr,
12226                                         phba->work_status[0],
12227                                         phba->work_status[1]);
12228                         phba->work_ha |= HA_ERATT;
12229                         phba->hba_flag |= HBA_ERATT_HANDLED;
12230                         return 1;
12231                 }
12232                 break;
12233         case LPFC_SLI_INTF_IF_TYPE_1:
12234         default:
12235                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12236                                 "2886 HBA Error Attention on unsupported "
12237                                 "if type %d.", if_type);
12238                 return 1;
12239         }
12240
12241         return 0;
12242 }
12243
12244 /**
12245  * lpfc_sli_check_eratt - check error attention events
12246  * @phba: Pointer to HBA context.
12247  *
12248  * This function is called from timer soft interrupt context to check HBA's
12249  * error attention register bit for error attention events.
12250  *
12251  * This function returns 1 when there is Error Attention in the Host Attention
12252  * Register and returns 0 otherwise.
12253  **/
12254 int
12255 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12256 {
12257         uint32_t ha_copy;
12258
12259         /* If somebody is waiting to handle an eratt, don't process it
12260          * here. The brdkill function will do this.
12261          */
12262         if (phba->link_flag & LS_IGNORE_ERATT)
12263                 return 0;
12264
12265         /* Check if interrupt handler handles this ERATT */
12266         spin_lock_irq(&phba->hbalock);
12267         if (phba->hba_flag & HBA_ERATT_HANDLED) {
12268                 /* Interrupt handler has handled ERATT */
12269                 spin_unlock_irq(&phba->hbalock);
12270                 return 0;
12271         }
12272
12273         /*
12274          * If there is deferred error attention, do not check for error
12275          * attention
12276          */
12277         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12278                 spin_unlock_irq(&phba->hbalock);
12279                 return 0;
12280         }
12281
12282         /* If PCI channel is offline, don't process it */
12283         if (unlikely(pci_channel_offline(phba->pcidev))) {
12284                 spin_unlock_irq(&phba->hbalock);
12285                 return 0;
12286         }
12287
12288         switch (phba->sli_rev) {
12289         case LPFC_SLI_REV2:
12290         case LPFC_SLI_REV3:
12291                 /* Read chip Host Attention (HA) register */
12292                 ha_copy = lpfc_sli_eratt_read(phba);
12293                 break;
12294         case LPFC_SLI_REV4:
12295                 /* Read device Uncoverable Error (UERR) registers */
12296                 ha_copy = lpfc_sli4_eratt_read(phba);
12297                 break;
12298         default:
12299                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12300                                 "0299 Invalid SLI revision (%d)\n",
12301                                 phba->sli_rev);
12302                 ha_copy = 0;
12303                 break;
12304         }
12305         spin_unlock_irq(&phba->hbalock);
12306
12307         return ha_copy;
12308 }
12309
12310 /**
12311  * lpfc_intr_state_check - Check device state for interrupt handling
12312  * @phba: Pointer to HBA context.
12313  *
12314  * This inline routine checks whether a device or its PCI slot is in a state
12315  * that the interrupt should be handled.
12316  *
12317  * This function returns 0 if the device or the PCI slot is in a state that
12318  * interrupt should be handled, otherwise -EIO.
12319  */
12320 static inline int
12321 lpfc_intr_state_check(struct lpfc_hba *phba)
12322 {
12323         /* If the pci channel is offline, ignore all the interrupts */
12324         if (unlikely(pci_channel_offline(phba->pcidev)))
12325                 return -EIO;
12326
12327         /* Update device level interrupt statistics */
12328         phba->sli.slistat.sli_intr++;
12329
12330         /* Ignore all interrupts during initialization. */
12331         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12332                 return -EIO;
12333
12334         return 0;
12335 }
12336
12337 /**
12338  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12339  * @irq: Interrupt number.
12340  * @dev_id: The device context pointer.
12341  *
12342  * This function is directly called from the PCI layer as an interrupt
12343  * service routine when device with SLI-3 interface spec is enabled with
12344  * MSI-X multi-message interrupt mode and there are slow-path events in
12345  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12346  * interrupt mode, this function is called as part of the device-level
12347  * interrupt handler. When the PCI slot is in error recovery or the HBA
12348  * is undergoing initialization, the interrupt handler will not process
12349  * the interrupt. The link attention and ELS ring attention events are
12350  * handled by the worker thread. The interrupt handler signals the worker
12351  * thread and returns for these events. This function is called without
12352  * any lock held. It gets the hbalock to access and update SLI data
12353  * structures.
12354  *
12355  * This function returns IRQ_HANDLED when interrupt is handled else it
12356  * returns IRQ_NONE.
12357  **/
12358 irqreturn_t
12359 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12360 {
12361         struct lpfc_hba  *phba;
12362         uint32_t ha_copy, hc_copy;
12363         uint32_t work_ha_copy;
12364         unsigned long status;
12365         unsigned long iflag;
12366         uint32_t control;
12367
12368         MAILBOX_t *mbox, *pmbox;
12369         struct lpfc_vport *vport;
12370         struct lpfc_nodelist *ndlp;
12371         struct lpfc_dmabuf *mp;
12372         LPFC_MBOXQ_t *pmb;
12373         int rc;
12374
12375         /*
12376          * Get the driver's phba structure from the dev_id and
12377          * assume the HBA is not interrupting.
12378          */
12379         phba = (struct lpfc_hba *)dev_id;
12380
12381         if (unlikely(!phba))
12382                 return IRQ_NONE;
12383
12384         /*
12385          * Stuff needs to be attented to when this function is invoked as an
12386          * individual interrupt handler in MSI-X multi-message interrupt mode
12387          */
12388         if (phba->intr_type == MSIX) {
12389                 /* Check device state for handling interrupt */
12390                 if (lpfc_intr_state_check(phba))
12391                         return IRQ_NONE;
12392                 /* Need to read HA REG for slow-path events */
12393                 spin_lock_irqsave(&phba->hbalock, iflag);
12394                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12395                         goto unplug_error;
12396                 /* If somebody is waiting to handle an eratt don't process it
12397                  * here. The brdkill function will do this.
12398                  */
12399                 if (phba->link_flag & LS_IGNORE_ERATT)
12400                         ha_copy &= ~HA_ERATT;
12401                 /* Check the need for handling ERATT in interrupt handler */
12402                 if (ha_copy & HA_ERATT) {
12403                         if (phba->hba_flag & HBA_ERATT_HANDLED)
12404                                 /* ERATT polling has handled ERATT */
12405                                 ha_copy &= ~HA_ERATT;
12406                         else
12407                                 /* Indicate interrupt handler handles ERATT */
12408                                 phba->hba_flag |= HBA_ERATT_HANDLED;
12409                 }
12410
12411                 /*
12412                  * If there is deferred error attention, do not check for any
12413                  * interrupt.
12414                  */
12415                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12416                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12417                         return IRQ_NONE;
12418                 }
12419
12420                 /* Clear up only attention source related to slow-path */
12421                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12422                         goto unplug_error;
12423
12424                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12425                         HC_LAINT_ENA | HC_ERINT_ENA),
12426                         phba->HCregaddr);
12427                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12428                         phba->HAregaddr);
12429                 writel(hc_copy, phba->HCregaddr);
12430                 readl(phba->HAregaddr); /* flush */
12431                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12432         } else
12433                 ha_copy = phba->ha_copy;
12434
12435         work_ha_copy = ha_copy & phba->work_ha_mask;
12436
12437         if (work_ha_copy) {
12438                 if (work_ha_copy & HA_LATT) {
12439                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12440                                 /*
12441                                  * Turn off Link Attention interrupts
12442                                  * until CLEAR_LA done
12443                                  */
12444                                 spin_lock_irqsave(&phba->hbalock, iflag);
12445                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12446                                 if (lpfc_readl(phba->HCregaddr, &control))
12447                                         goto unplug_error;
12448                                 control &= ~HC_LAINT_ENA;
12449                                 writel(control, phba->HCregaddr);
12450                                 readl(phba->HCregaddr); /* flush */
12451                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12452                         }
12453                         else
12454                                 work_ha_copy &= ~HA_LATT;
12455                 }
12456
12457                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12458                         /*
12459                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12460                          * the only slow ring.
12461                          */
12462                         status = (work_ha_copy &
12463                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
12464                         status >>= (4*LPFC_ELS_RING);
12465                         if (status & HA_RXMASK) {
12466                                 spin_lock_irqsave(&phba->hbalock, iflag);
12467                                 if (lpfc_readl(phba->HCregaddr, &control))
12468                                         goto unplug_error;
12469
12470                                 lpfc_debugfs_slow_ring_trc(phba,
12471                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
12472                                 control, status,
12473                                 (uint32_t)phba->sli.slistat.sli_intr);
12474
12475                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12476                                         lpfc_debugfs_slow_ring_trc(phba,
12477                                                 "ISR Disable ring:"
12478                                                 "pwork:x%x hawork:x%x wait:x%x",
12479                                                 phba->work_ha, work_ha_copy,
12480                                                 (uint32_t)((unsigned long)
12481                                                 &phba->work_waitq));
12482
12483                                         control &=
12484                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
12485                                         writel(control, phba->HCregaddr);
12486                                         readl(phba->HCregaddr); /* flush */
12487                                 }
12488                                 else {
12489                                         lpfc_debugfs_slow_ring_trc(phba,
12490                                                 "ISR slow ring:   pwork:"
12491                                                 "x%x hawork:x%x wait:x%x",
12492                                                 phba->work_ha, work_ha_copy,
12493                                                 (uint32_t)((unsigned long)
12494                                                 &phba->work_waitq));
12495                                 }
12496                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12497                         }
12498                 }
12499                 spin_lock_irqsave(&phba->hbalock, iflag);
12500                 if (work_ha_copy & HA_ERATT) {
12501                         if (lpfc_sli_read_hs(phba))
12502                                 goto unplug_error;
12503                         /*
12504                          * Check if there is a deferred error condition
12505                          * is active
12506                          */
12507                         if ((HS_FFER1 & phba->work_hs) &&
12508                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12509                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
12510                                   phba->work_hs)) {
12511                                 phba->hba_flag |= DEFER_ERATT;
12512                                 /* Clear all interrupt enable conditions */
12513                                 writel(0, phba->HCregaddr);
12514                                 readl(phba->HCregaddr);
12515                         }
12516                 }
12517
12518                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12519                         pmb = phba->sli.mbox_active;
12520                         pmbox = &pmb->u.mb;
12521                         mbox = phba->mbox;
12522                         vport = pmb->vport;
12523
12524                         /* First check out the status word */
12525                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12526                         if (pmbox->mbxOwner != OWN_HOST) {
12527                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12528                                 /*
12529                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
12530                                  * mbxStatus <status>
12531                                  */
12532                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12533                                                 LOG_SLI,
12534                                                 "(%d):0304 Stray Mailbox "
12535                                                 "Interrupt mbxCommand x%x "
12536                                                 "mbxStatus x%x\n",
12537                                                 (vport ? vport->vpi : 0),
12538                                                 pmbox->mbxCommand,
12539                                                 pmbox->mbxStatus);
12540                                 /* clear mailbox attention bit */
12541                                 work_ha_copy &= ~HA_MBATT;
12542                         } else {
12543                                 phba->sli.mbox_active = NULL;
12544                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12545                                 phba->last_completion_time = jiffies;
12546                                 del_timer(&phba->sli.mbox_tmo);
12547                                 if (pmb->mbox_cmpl) {
12548                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
12549                                                         MAILBOX_CMD_SIZE);
12550                                         if (pmb->out_ext_byte_len &&
12551                                                 pmb->ctx_buf)
12552                                                 lpfc_sli_pcimem_bcopy(
12553                                                 phba->mbox_ext,
12554                                                 pmb->ctx_buf,
12555                                                 pmb->out_ext_byte_len);
12556                                 }
12557                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12558                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12559
12560                                         lpfc_debugfs_disc_trc(vport,
12561                                                 LPFC_DISC_TRC_MBOX_VPORT,
12562                                                 "MBOX dflt rpi: : "
12563                                                 "status:x%x rpi:x%x",
12564                                                 (uint32_t)pmbox->mbxStatus,
12565                                                 pmbox->un.varWords[0], 0);
12566
12567                                         if (!pmbox->mbxStatus) {
12568                                                 mp = (struct lpfc_dmabuf *)
12569                                                         (pmb->ctx_buf);
12570                                                 ndlp = (struct lpfc_nodelist *)
12571                                                         pmb->ctx_ndlp;
12572
12573                                                 /* Reg_LOGIN of dflt RPI was
12574                                                  * successful. new lets get
12575                                                  * rid of the RPI using the
12576                                                  * same mbox buffer.
12577                                                  */
12578                                                 lpfc_unreg_login(phba,
12579                                                         vport->vpi,
12580                                                         pmbox->un.varWords[0],
12581                                                         pmb);
12582                                                 pmb->mbox_cmpl =
12583                                                         lpfc_mbx_cmpl_dflt_rpi;
12584                                                 pmb->ctx_buf = mp;
12585                                                 pmb->ctx_ndlp = ndlp;
12586                                                 pmb->vport = vport;
12587                                                 rc = lpfc_sli_issue_mbox(phba,
12588                                                                 pmb,
12589                                                                 MBX_NOWAIT);
12590                                                 if (rc != MBX_BUSY)
12591                                                         lpfc_printf_log(phba,
12592                                                         KERN_ERR,
12593                                                         LOG_MBOX | LOG_SLI,
12594                                                         "0350 rc should have"
12595                                                         "been MBX_BUSY\n");
12596                                                 if (rc != MBX_NOT_FINISHED)
12597                                                         goto send_current_mbox;
12598                                         }
12599                                 }
12600                                 spin_lock_irqsave(
12601                                                 &phba->pport->work_port_lock,
12602                                                 iflag);
12603                                 phba->pport->work_port_events &=
12604                                         ~WORKER_MBOX_TMO;
12605                                 spin_unlock_irqrestore(
12606                                                 &phba->pport->work_port_lock,
12607                                                 iflag);
12608                                 lpfc_mbox_cmpl_put(phba, pmb);
12609                         }
12610                 } else
12611                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12612
12613                 if ((work_ha_copy & HA_MBATT) &&
12614                     (phba->sli.mbox_active == NULL)) {
12615 send_current_mbox:
12616                         /* Process next mailbox command if there is one */
12617                         do {
12618                                 rc = lpfc_sli_issue_mbox(phba, NULL,
12619                                                          MBX_NOWAIT);
12620                         } while (rc == MBX_NOT_FINISHED);
12621                         if (rc != MBX_SUCCESS)
12622                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12623                                                 LOG_SLI, "0349 rc should be "
12624                                                 "MBX_SUCCESS\n");
12625                 }
12626
12627                 spin_lock_irqsave(&phba->hbalock, iflag);
12628                 phba->work_ha |= work_ha_copy;
12629                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12630                 lpfc_worker_wake_up(phba);
12631         }
12632         return IRQ_HANDLED;
12633 unplug_error:
12634         spin_unlock_irqrestore(&phba->hbalock, iflag);
12635         return IRQ_HANDLED;
12636
12637 } /* lpfc_sli_sp_intr_handler */
12638
12639 /**
12640  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12641  * @irq: Interrupt number.
12642  * @dev_id: The device context pointer.
12643  *
12644  * This function is directly called from the PCI layer as an interrupt
12645  * service routine when device with SLI-3 interface spec is enabled with
12646  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12647  * ring event in the HBA. However, when the device is enabled with either
12648  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12649  * device-level interrupt handler. When the PCI slot is in error recovery
12650  * or the HBA is undergoing initialization, the interrupt handler will not
12651  * process the interrupt. The SCSI FCP fast-path ring event are handled in
12652  * the intrrupt context. This function is called without any lock held.
12653  * It gets the hbalock to access and update SLI data structures.
12654  *
12655  * This function returns IRQ_HANDLED when interrupt is handled else it
12656  * returns IRQ_NONE.
12657  **/
12658 irqreturn_t
12659 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12660 {
12661         struct lpfc_hba  *phba;
12662         uint32_t ha_copy;
12663         unsigned long status;
12664         unsigned long iflag;
12665         struct lpfc_sli_ring *pring;
12666
12667         /* Get the driver's phba structure from the dev_id and
12668          * assume the HBA is not interrupting.
12669          */
12670         phba = (struct lpfc_hba *) dev_id;
12671
12672         if (unlikely(!phba))
12673                 return IRQ_NONE;
12674
12675         /*
12676          * Stuff needs to be attented to when this function is invoked as an
12677          * individual interrupt handler in MSI-X multi-message interrupt mode
12678          */
12679         if (phba->intr_type == MSIX) {
12680                 /* Check device state for handling interrupt */
12681                 if (lpfc_intr_state_check(phba))
12682                         return IRQ_NONE;
12683                 /* Need to read HA REG for FCP ring and other ring events */
12684                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12685                         return IRQ_HANDLED;
12686                 /* Clear up only attention source related to fast-path */
12687                 spin_lock_irqsave(&phba->hbalock, iflag);
12688                 /*
12689                  * If there is deferred error attention, do not check for
12690                  * any interrupt.
12691                  */
12692                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12693                         spin_unlock_irqrestore(&phba->hbalock, iflag);
12694                         return IRQ_NONE;
12695                 }
12696                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12697                         phba->HAregaddr);
12698                 readl(phba->HAregaddr); /* flush */
12699                 spin_unlock_irqrestore(&phba->hbalock, iflag);
12700         } else
12701                 ha_copy = phba->ha_copy;
12702
12703         /*
12704          * Process all events on FCP ring. Take the optimized path for FCP IO.
12705          */
12706         ha_copy &= ~(phba->work_ha_mask);
12707
12708         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12709         status >>= (4*LPFC_FCP_RING);
12710         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12711         if (status & HA_RXMASK)
12712                 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12713
12714         if (phba->cfg_multi_ring_support == 2) {
12715                 /*
12716                  * Process all events on extra ring. Take the optimized path
12717                  * for extra ring IO.
12718                  */
12719                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12720                 status >>= (4*LPFC_EXTRA_RING);
12721                 if (status & HA_RXMASK) {
12722                         lpfc_sli_handle_fast_ring_event(phba,
12723                                         &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12724                                         status);
12725                 }
12726         }
12727         return IRQ_HANDLED;
12728 }  /* lpfc_sli_fp_intr_handler */
12729
12730 /**
12731  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12732  * @irq: Interrupt number.
12733  * @dev_id: The device context pointer.
12734  *
12735  * This function is the HBA device-level interrupt handler to device with
12736  * SLI-3 interface spec, called from the PCI layer when either MSI or
12737  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12738  * requires driver attention. This function invokes the slow-path interrupt
12739  * attention handling function and fast-path interrupt attention handling
12740  * function in turn to process the relevant HBA attention events. This
12741  * function is called without any lock held. It gets the hbalock to access
12742  * and update SLI data structures.
12743  *
12744  * This function returns IRQ_HANDLED when interrupt is handled, else it
12745  * returns IRQ_NONE.
12746  **/
12747 irqreturn_t
12748 lpfc_sli_intr_handler(int irq, void *dev_id)
12749 {
12750         struct lpfc_hba  *phba;
12751         irqreturn_t sp_irq_rc, fp_irq_rc;
12752         unsigned long status1, status2;
12753         uint32_t hc_copy;
12754
12755         /*
12756          * Get the driver's phba structure from the dev_id and
12757          * assume the HBA is not interrupting.
12758          */
12759         phba = (struct lpfc_hba *) dev_id;
12760
12761         if (unlikely(!phba))
12762                 return IRQ_NONE;
12763
12764         /* Check device state for handling interrupt */
12765         if (lpfc_intr_state_check(phba))
12766                 return IRQ_NONE;
12767
12768         spin_lock(&phba->hbalock);
12769         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12770                 spin_unlock(&phba->hbalock);
12771                 return IRQ_HANDLED;
12772         }
12773
12774         if (unlikely(!phba->ha_copy)) {
12775                 spin_unlock(&phba->hbalock);
12776                 return IRQ_NONE;
12777         } else if (phba->ha_copy & HA_ERATT) {
12778                 if (phba->hba_flag & HBA_ERATT_HANDLED)
12779                         /* ERATT polling has handled ERATT */
12780                         phba->ha_copy &= ~HA_ERATT;
12781                 else
12782                         /* Indicate interrupt handler handles ERATT */
12783                         phba->hba_flag |= HBA_ERATT_HANDLED;
12784         }
12785
12786         /*
12787          * If there is deferred error attention, do not check for any interrupt.
12788          */
12789         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12790                 spin_unlock(&phba->hbalock);
12791                 return IRQ_NONE;
12792         }
12793
12794         /* Clear attention sources except link and error attentions */
12795         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12796                 spin_unlock(&phba->hbalock);
12797                 return IRQ_HANDLED;
12798         }
12799         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12800                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12801                 phba->HCregaddr);
12802         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12803         writel(hc_copy, phba->HCregaddr);
12804         readl(phba->HAregaddr); /* flush */
12805         spin_unlock(&phba->hbalock);
12806
12807         /*
12808          * Invokes slow-path host attention interrupt handling as appropriate.
12809          */
12810
12811         /* status of events with mailbox and link attention */
12812         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12813
12814         /* status of events with ELS ring */
12815         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
12816         status2 >>= (4*LPFC_ELS_RING);
12817
12818         if (status1 || (status2 & HA_RXMASK))
12819                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12820         else
12821                 sp_irq_rc = IRQ_NONE;
12822
12823         /*
12824          * Invoke fast-path host attention interrupt handling as appropriate.
12825          */
12826
12827         /* status of events with FCP ring */
12828         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12829         status1 >>= (4*LPFC_FCP_RING);
12830
12831         /* status of events with extra ring */
12832         if (phba->cfg_multi_ring_support == 2) {
12833                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12834                 status2 >>= (4*LPFC_EXTRA_RING);
12835         } else
12836                 status2 = 0;
12837
12838         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12839                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12840         else
12841                 fp_irq_rc = IRQ_NONE;
12842
12843         /* Return device-level interrupt handling status */
12844         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12845 }  /* lpfc_sli_intr_handler */
12846
12847 /**
12848  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12849  * @phba: pointer to lpfc hba data structure.
12850  *
12851  * This routine is invoked by the worker thread to process all the pending
12852  * SLI4 els abort xri events.
12853  **/
12854 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12855 {
12856         struct lpfc_cq_event *cq_event;
12857
12858         /* First, declare the els xri abort event has been handled */
12859         spin_lock_irq(&phba->hbalock);
12860         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12861         spin_unlock_irq(&phba->hbalock);
12862         /* Now, handle all the els xri abort events */
12863         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12864                 /* Get the first event from the head of the event queue */
12865                 spin_lock_irq(&phba->hbalock);
12866                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12867                                  cq_event, struct lpfc_cq_event, list);
12868                 spin_unlock_irq(&phba->hbalock);
12869                 /* Notify aborted XRI for ELS work queue */
12870                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12871                 /* Free the event processed back to the free pool */
12872                 lpfc_sli4_cq_event_release(phba, cq_event);
12873         }
12874 }
12875
12876 /**
12877  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12878  * @phba: pointer to lpfc hba data structure
12879  * @pIocbIn: pointer to the rspiocbq
12880  * @pIocbOut: pointer to the cmdiocbq
12881  * @wcqe: pointer to the complete wcqe
12882  *
12883  * This routine transfers the fields of a command iocbq to a response iocbq
12884  * by copying all the IOCB fields from command iocbq and transferring the
12885  * completion status information from the complete wcqe.
12886  **/
12887 static void
12888 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12889                               struct lpfc_iocbq *pIocbIn,
12890                               struct lpfc_iocbq *pIocbOut,
12891                               struct lpfc_wcqe_complete *wcqe)
12892 {
12893         int numBdes, i;
12894         unsigned long iflags;
12895         uint32_t status, max_response;
12896         struct lpfc_dmabuf *dmabuf;
12897         struct ulp_bde64 *bpl, bde;
12898         size_t offset = offsetof(struct lpfc_iocbq, iocb);
12899
12900         memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12901                sizeof(struct lpfc_iocbq) - offset);
12902         /* Map WCQE parameters into irspiocb parameters */
12903         status = bf_get(lpfc_wcqe_c_status, wcqe);
12904         pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12905         if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12906                 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12907                         pIocbIn->iocb.un.fcpi.fcpi_parm =
12908                                         pIocbOut->iocb.un.fcpi.fcpi_parm -
12909                                         wcqe->total_data_placed;
12910                 else
12911                         pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12912         else {
12913                 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12914                 switch (pIocbOut->iocb.ulpCommand) {
12915                 case CMD_ELS_REQUEST64_CR:
12916                         dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12917                         bpl  = (struct ulp_bde64 *)dmabuf->virt;
12918                         bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12919                         max_response = bde.tus.f.bdeSize;
12920                         break;
12921                 case CMD_GEN_REQUEST64_CR:
12922                         max_response = 0;
12923                         if (!pIocbOut->context3)
12924                                 break;
12925                         numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12926                                         sizeof(struct ulp_bde64);
12927                         dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12928                         bpl = (struct ulp_bde64 *)dmabuf->virt;
12929                         for (i = 0; i < numBdes; i++) {
12930                                 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12931                                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12932                                         max_response += bde.tus.f.bdeSize;
12933                         }
12934                         break;
12935                 default:
12936                         max_response = wcqe->total_data_placed;
12937                         break;
12938                 }
12939                 if (max_response < wcqe->total_data_placed)
12940                         pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12941                 else
12942                         pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12943                                 wcqe->total_data_placed;
12944         }
12945
12946         /* Convert BG errors for completion status */
12947         if (status == CQE_STATUS_DI_ERROR) {
12948                 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12949
12950                 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12951                         pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12952                 else
12953                         pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12954
12955                 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12956                 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12957                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12958                                 BGS_GUARD_ERR_MASK;
12959                 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12960                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12961                                 BGS_APPTAG_ERR_MASK;
12962                 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12963                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12964                                 BGS_REFTAG_ERR_MASK;
12965
12966                 /* Check to see if there was any good data before the error */
12967                 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12968                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12969                                 BGS_HI_WATER_MARK_PRESENT_MASK;
12970                         pIocbIn->iocb.unsli3.sli3_bg.bghm =
12971                                 wcqe->total_data_placed;
12972                 }
12973
12974                 /*
12975                 * Set ALL the error bits to indicate we don't know what
12976                 * type of error it is.
12977                 */
12978                 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12979                         pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12980                                 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12981                                 BGS_GUARD_ERR_MASK);
12982         }
12983
12984         /* Pick up HBA exchange busy condition */
12985         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12986                 spin_lock_irqsave(&phba->hbalock, iflags);
12987                 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12988                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12989         }
12990 }
12991
12992 /**
12993  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12994  * @phba: Pointer to HBA context object.
12995  * @wcqe: Pointer to work-queue completion queue entry.
12996  *
12997  * This routine handles an ELS work-queue completion event and construct
12998  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12999  * discovery engine to handle.
13000  *
13001  * Return: Pointer to the receive IOCBQ, NULL otherwise.
13002  **/
13003 static struct lpfc_iocbq *
13004 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13005                                struct lpfc_iocbq *irspiocbq)
13006 {
13007         struct lpfc_sli_ring *pring;
13008         struct lpfc_iocbq *cmdiocbq;
13009         struct lpfc_wcqe_complete *wcqe;
13010         unsigned long iflags;
13011
13012         pring = lpfc_phba_elsring(phba);
13013         if (unlikely(!pring))
13014                 return NULL;
13015
13016         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13017         pring->stats.iocb_event++;
13018         /* Look up the ELS command IOCB and create pseudo response IOCB */
13019         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13020                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13021         if (unlikely(!cmdiocbq)) {
13022                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13023                                 "0386 ELS complete with no corresponding "
13024                                 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13025                                 wcqe->word0, wcqe->total_data_placed,
13026                                 wcqe->parameter, wcqe->word3);
13027                 lpfc_sli_release_iocbq(phba, irspiocbq);
13028                 return NULL;
13029         }
13030
13031         spin_lock_irqsave(&pring->ring_lock, iflags);
13032         /* Put the iocb back on the txcmplq */
13033         lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13034         spin_unlock_irqrestore(&pring->ring_lock, iflags);
13035
13036         /* Fake the irspiocbq and copy necessary response information */
13037         lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13038
13039         return irspiocbq;
13040 }
13041
13042 inline struct lpfc_cq_event *
13043 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13044 {
13045         struct lpfc_cq_event *cq_event;
13046
13047         /* Allocate a new internal CQ_EVENT entry */
13048         cq_event = lpfc_sli4_cq_event_alloc(phba);
13049         if (!cq_event) {
13050                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13051                                 "0602 Failed to alloc CQ_EVENT entry\n");
13052                 return NULL;
13053         }
13054
13055         /* Move the CQE into the event */
13056         memcpy(&cq_event->cqe, entry, size);
13057         return cq_event;
13058 }
13059
13060 /**
13061  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13062  * @phba: Pointer to HBA context object.
13063  * @cqe: Pointer to mailbox completion queue entry.
13064  *
13065  * This routine process a mailbox completion queue entry with asynchrous
13066  * event.
13067  *
13068  * Return: true if work posted to worker thread, otherwise false.
13069  **/
13070 static bool
13071 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13072 {
13073         struct lpfc_cq_event *cq_event;
13074         unsigned long iflags;
13075
13076         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13077                         "0392 Async Event: word0:x%x, word1:x%x, "
13078                         "word2:x%x, word3:x%x\n", mcqe->word0,
13079                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13080
13081         cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13082         if (!cq_event)
13083                 return false;
13084         spin_lock_irqsave(&phba->hbalock, iflags);
13085         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13086         /* Set the async event flag */
13087         phba->hba_flag |= ASYNC_EVENT;
13088         spin_unlock_irqrestore(&phba->hbalock, iflags);
13089
13090         return true;
13091 }
13092
13093 /**
13094  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13095  * @phba: Pointer to HBA context object.
13096  * @cqe: Pointer to mailbox completion queue entry.
13097  *
13098  * This routine process a mailbox completion queue entry with mailbox
13099  * completion event.
13100  *
13101  * Return: true if work posted to worker thread, otherwise false.
13102  **/
13103 static bool
13104 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13105 {
13106         uint32_t mcqe_status;
13107         MAILBOX_t *mbox, *pmbox;
13108         struct lpfc_mqe *mqe;
13109         struct lpfc_vport *vport;
13110         struct lpfc_nodelist *ndlp;
13111         struct lpfc_dmabuf *mp;
13112         unsigned long iflags;
13113         LPFC_MBOXQ_t *pmb;
13114         bool workposted = false;
13115         int rc;
13116
13117         /* If not a mailbox complete MCQE, out by checking mailbox consume */
13118         if (!bf_get(lpfc_trailer_completed, mcqe))
13119                 goto out_no_mqe_complete;
13120
13121         /* Get the reference to the active mbox command */
13122         spin_lock_irqsave(&phba->hbalock, iflags);
13123         pmb = phba->sli.mbox_active;
13124         if (unlikely(!pmb)) {
13125                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13126                                 "1832 No pending MBOX command to handle\n");
13127                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13128                 goto out_no_mqe_complete;
13129         }
13130         spin_unlock_irqrestore(&phba->hbalock, iflags);
13131         mqe = &pmb->u.mqe;
13132         pmbox = (MAILBOX_t *)&pmb->u.mqe;
13133         mbox = phba->mbox;
13134         vport = pmb->vport;
13135
13136         /* Reset heartbeat timer */
13137         phba->last_completion_time = jiffies;
13138         del_timer(&phba->sli.mbox_tmo);
13139
13140         /* Move mbox data to caller's mailbox region, do endian swapping */
13141         if (pmb->mbox_cmpl && mbox)
13142                 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13143
13144         /*
13145          * For mcqe errors, conditionally move a modified error code to
13146          * the mbox so that the error will not be missed.
13147          */
13148         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13149         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13150                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13151                         bf_set(lpfc_mqe_status, mqe,
13152                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
13153         }
13154         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13155                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13156                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13157                                       "MBOX dflt rpi: status:x%x rpi:x%x",
13158                                       mcqe_status,
13159                                       pmbox->un.varWords[0], 0);
13160                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13161                         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13162                         ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13163                         /* Reg_LOGIN of dflt RPI was successful. Now lets get
13164                          * RID of the PPI using the same mbox buffer.
13165                          */
13166                         lpfc_unreg_login(phba, vport->vpi,
13167                                          pmbox->un.varWords[0], pmb);
13168                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13169                         pmb->ctx_buf = mp;
13170                         pmb->ctx_ndlp = ndlp;
13171                         pmb->vport = vport;
13172                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13173                         if (rc != MBX_BUSY)
13174                                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13175                                                 LOG_SLI, "0385 rc should "
13176                                                 "have been MBX_BUSY\n");
13177                         if (rc != MBX_NOT_FINISHED)
13178                                 goto send_current_mbox;
13179                 }
13180         }
13181         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13182         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13183         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13184
13185         /* There is mailbox completion work to do */
13186         spin_lock_irqsave(&phba->hbalock, iflags);
13187         __lpfc_mbox_cmpl_put(phba, pmb);
13188         phba->work_ha |= HA_MBATT;
13189         spin_unlock_irqrestore(&phba->hbalock, iflags);
13190         workposted = true;
13191
13192 send_current_mbox:
13193         spin_lock_irqsave(&phba->hbalock, iflags);
13194         /* Release the mailbox command posting token */
13195         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13196         /* Setting active mailbox pointer need to be in sync to flag clear */
13197         phba->sli.mbox_active = NULL;
13198         if (bf_get(lpfc_trailer_consumed, mcqe))
13199                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13200         spin_unlock_irqrestore(&phba->hbalock, iflags);
13201         /* Wake up worker thread to post the next pending mailbox command */
13202         lpfc_worker_wake_up(phba);
13203         return workposted;
13204
13205 out_no_mqe_complete:
13206         spin_lock_irqsave(&phba->hbalock, iflags);
13207         if (bf_get(lpfc_trailer_consumed, mcqe))
13208                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13209         spin_unlock_irqrestore(&phba->hbalock, iflags);
13210         return false;
13211 }
13212
13213 /**
13214  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13215  * @phba: Pointer to HBA context object.
13216  * @cqe: Pointer to mailbox completion queue entry.
13217  *
13218  * This routine process a mailbox completion queue entry, it invokes the
13219  * proper mailbox complete handling or asynchrous event handling routine
13220  * according to the MCQE's async bit.
13221  *
13222  * Return: true if work posted to worker thread, otherwise false.
13223  **/
13224 static bool
13225 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13226                          struct lpfc_cqe *cqe)
13227 {
13228         struct lpfc_mcqe mcqe;
13229         bool workposted;
13230
13231         cq->CQ_mbox++;
13232
13233         /* Copy the mailbox MCQE and convert endian order as needed */
13234         lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13235
13236         /* Invoke the proper event handling routine */
13237         if (!bf_get(lpfc_trailer_async, &mcqe))
13238                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13239         else
13240                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13241         return workposted;
13242 }
13243
13244 /**
13245  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13246  * @phba: Pointer to HBA context object.
13247  * @cq: Pointer to associated CQ
13248  * @wcqe: Pointer to work-queue completion queue entry.
13249  *
13250  * This routine handles an ELS work-queue completion event.
13251  *
13252  * Return: true if work posted to worker thread, otherwise false.
13253  **/
13254 static bool
13255 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13256                              struct lpfc_wcqe_complete *wcqe)
13257 {
13258         struct lpfc_iocbq *irspiocbq;
13259         unsigned long iflags;
13260         struct lpfc_sli_ring *pring = cq->pring;
13261         int txq_cnt = 0;
13262         int txcmplq_cnt = 0;
13263
13264         /* Check for response status */
13265         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13266                 /* Log the error status */
13267                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13268                                 "0357 ELS CQE error: status=x%x: "
13269                                 "CQE: %08x %08x %08x %08x\n",
13270                                 bf_get(lpfc_wcqe_c_status, wcqe),
13271                                 wcqe->word0, wcqe->total_data_placed,
13272                                 wcqe->parameter, wcqe->word3);
13273         }
13274
13275         /* Get an irspiocbq for later ELS response processing use */
13276         irspiocbq = lpfc_sli_get_iocbq(phba);
13277         if (!irspiocbq) {
13278                 if (!list_empty(&pring->txq))
13279                         txq_cnt++;
13280                 if (!list_empty(&pring->txcmplq))
13281                         txcmplq_cnt++;
13282                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13283                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13284                         "els_txcmplq_cnt=%d\n",
13285                         txq_cnt, phba->iocb_cnt,
13286                         txcmplq_cnt);
13287                 return false;
13288         }
13289
13290         /* Save off the slow-path queue event for work thread to process */
13291         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13292         spin_lock_irqsave(&phba->hbalock, iflags);
13293         list_add_tail(&irspiocbq->cq_event.list,
13294                       &phba->sli4_hba.sp_queue_event);
13295         phba->hba_flag |= HBA_SP_QUEUE_EVT;
13296         spin_unlock_irqrestore(&phba->hbalock, iflags);
13297
13298         return true;
13299 }
13300
13301 /**
13302  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13303  * @phba: Pointer to HBA context object.
13304  * @wcqe: Pointer to work-queue completion queue entry.
13305  *
13306  * This routine handles slow-path WQ entry consumed event by invoking the
13307  * proper WQ release routine to the slow-path WQ.
13308  **/
13309 static void
13310 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13311                              struct lpfc_wcqe_release *wcqe)
13312 {
13313         /* sanity check on queue memory */
13314         if (unlikely(!phba->sli4_hba.els_wq))
13315                 return;
13316         /* Check for the slow-path ELS work queue */
13317         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13318                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13319                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13320         else
13321                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13322                                 "2579 Slow-path wqe consume event carries "
13323                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13324                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13325                                 phba->sli4_hba.els_wq->queue_id);
13326 }
13327
13328 /**
13329  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13330  * @phba: Pointer to HBA context object.
13331  * @cq: Pointer to a WQ completion queue.
13332  * @wcqe: Pointer to work-queue completion queue entry.
13333  *
13334  * This routine handles an XRI abort event.
13335  *
13336  * Return: true if work posted to worker thread, otherwise false.
13337  **/
13338 static bool
13339 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13340                                    struct lpfc_queue *cq,
13341                                    struct sli4_wcqe_xri_aborted *wcqe)
13342 {
13343         bool workposted = false;
13344         struct lpfc_cq_event *cq_event;
13345         unsigned long iflags;
13346
13347         switch (cq->subtype) {
13348         case LPFC_IO:
13349                 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13350                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13351                         /* Notify aborted XRI for NVME work queue */
13352                         if (phba->nvmet_support)
13353                                 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13354                 }
13355                 workposted = false;
13356                 break;
13357         case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13358         case LPFC_ELS:
13359                 cq_event = lpfc_cq_event_setup(
13360                         phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13361                 if (!cq_event)
13362                         return false;
13363                 cq_event->hdwq = cq->hdwq;
13364                 spin_lock_irqsave(&phba->hbalock, iflags);
13365                 list_add_tail(&cq_event->list,
13366                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13367                 /* Set the els xri abort event flag */
13368                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13369                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13370                 workposted = true;
13371                 break;
13372         default:
13373                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13374                                 "0603 Invalid CQ subtype %d: "
13375                                 "%08x %08x %08x %08x\n",
13376                                 cq->subtype, wcqe->word0, wcqe->parameter,
13377                                 wcqe->word2, wcqe->word3);
13378                 workposted = false;
13379                 break;
13380         }
13381         return workposted;
13382 }
13383
13384 #define FC_RCTL_MDS_DIAGS       0xF4
13385
13386 /**
13387  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13388  * @phba: Pointer to HBA context object.
13389  * @rcqe: Pointer to receive-queue completion queue entry.
13390  *
13391  * This routine process a receive-queue completion queue entry.
13392  *
13393  * Return: true if work posted to worker thread, otherwise false.
13394  **/
13395 static bool
13396 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13397 {
13398         bool workposted = false;
13399         struct fc_frame_header *fc_hdr;
13400         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13401         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13402         struct lpfc_nvmet_tgtport *tgtp;
13403         struct hbq_dmabuf *dma_buf;
13404         uint32_t status, rq_id;
13405         unsigned long iflags;
13406
13407         /* sanity check on queue memory */
13408         if (unlikely(!hrq) || unlikely(!drq))
13409                 return workposted;
13410
13411         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13412                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13413         else
13414                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13415         if (rq_id != hrq->queue_id)
13416                 goto out;
13417
13418         status = bf_get(lpfc_rcqe_status, rcqe);
13419         switch (status) {
13420         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13421                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13422                                 "2537 Receive Frame Truncated!!\n");
13423                 /* fall through */
13424         case FC_STATUS_RQ_SUCCESS:
13425                 spin_lock_irqsave(&phba->hbalock, iflags);
13426                 lpfc_sli4_rq_release(hrq, drq);
13427                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13428                 if (!dma_buf) {
13429                         hrq->RQ_no_buf_found++;
13430                         spin_unlock_irqrestore(&phba->hbalock, iflags);
13431                         goto out;
13432                 }
13433                 hrq->RQ_rcv_buf++;
13434                 hrq->RQ_buf_posted--;
13435                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13436
13437                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13438
13439                 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13440                     fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13441                         spin_unlock_irqrestore(&phba->hbalock, iflags);
13442                         /* Handle MDS Loopback frames */
13443                         lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13444                         break;
13445                 }
13446
13447                 /* save off the frame for the work thread to process */
13448                 list_add_tail(&dma_buf->cq_event.list,
13449                               &phba->sli4_hba.sp_queue_event);
13450                 /* Frame received */
13451                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13452                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13453                 workposted = true;
13454                 break;
13455         case FC_STATUS_INSUFF_BUF_FRM_DISC:
13456                 if (phba->nvmet_support) {
13457                         tgtp = phba->targetport->private;
13458                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13459                                         "6402 RQE Error x%x, posted %d err_cnt "
13460                                         "%d: %x %x %x\n",
13461                                         status, hrq->RQ_buf_posted,
13462                                         hrq->RQ_no_posted_buf,
13463                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
13464                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
13465                                         atomic_read(&tgtp->xmt_fcp_release));
13466                 }
13467                 /* fallthrough */
13468
13469         case FC_STATUS_INSUFF_BUF_NEED_BUF:
13470                 hrq->RQ_no_posted_buf++;
13471                 /* Post more buffers if possible */
13472                 spin_lock_irqsave(&phba->hbalock, iflags);
13473                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13474                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13475                 workposted = true;
13476                 break;
13477         }
13478 out:
13479         return workposted;
13480 }
13481
13482 /**
13483  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13484  * @phba: Pointer to HBA context object.
13485  * @cq: Pointer to the completion queue.
13486  * @cqe: Pointer to a completion queue entry.
13487  *
13488  * This routine process a slow-path work-queue or receive queue completion queue
13489  * entry.
13490  *
13491  * Return: true if work posted to worker thread, otherwise false.
13492  **/
13493 static bool
13494 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13495                          struct lpfc_cqe *cqe)
13496 {
13497         struct lpfc_cqe cqevt;
13498         bool workposted = false;
13499
13500         /* Copy the work queue CQE and convert endian order if needed */
13501         lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13502
13503         /* Check and process for different type of WCQE and dispatch */
13504         switch (bf_get(lpfc_cqe_code, &cqevt)) {
13505         case CQE_CODE_COMPL_WQE:
13506                 /* Process the WQ/RQ complete event */
13507                 phba->last_completion_time = jiffies;
13508                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13509                                 (struct lpfc_wcqe_complete *)&cqevt);
13510                 break;
13511         case CQE_CODE_RELEASE_WQE:
13512                 /* Process the WQ release event */
13513                 lpfc_sli4_sp_handle_rel_wcqe(phba,
13514                                 (struct lpfc_wcqe_release *)&cqevt);
13515                 break;
13516         case CQE_CODE_XRI_ABORTED:
13517                 /* Process the WQ XRI abort event */
13518                 phba->last_completion_time = jiffies;
13519                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13520                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
13521                 break;
13522         case CQE_CODE_RECEIVE:
13523         case CQE_CODE_RECEIVE_V1:
13524                 /* Process the RQ event */
13525                 phba->last_completion_time = jiffies;
13526                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13527                                 (struct lpfc_rcqe *)&cqevt);
13528                 break;
13529         default:
13530                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13531                                 "0388 Not a valid WCQE code: x%x\n",
13532                                 bf_get(lpfc_cqe_code, &cqevt));
13533                 break;
13534         }
13535         return workposted;
13536 }
13537
13538 /**
13539  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13540  * @phba: Pointer to HBA context object.
13541  * @eqe: Pointer to fast-path event queue entry.
13542  *
13543  * This routine process a event queue entry from the slow-path event queue.
13544  * It will check the MajorCode and MinorCode to determine this is for a
13545  * completion event on a completion queue, if not, an error shall be logged
13546  * and just return. Otherwise, it will get to the corresponding completion
13547  * queue and process all the entries on that completion queue, rearm the
13548  * completion queue, and then return.
13549  *
13550  **/
13551 static void
13552 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13553         struct lpfc_queue *speq)
13554 {
13555         struct lpfc_queue *cq = NULL, *childq;
13556         uint16_t cqid;
13557
13558         /* Get the reference to the corresponding CQ */
13559         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13560
13561         list_for_each_entry(childq, &speq->child_list, list) {
13562                 if (childq->queue_id == cqid) {
13563                         cq = childq;
13564                         break;
13565                 }
13566         }
13567         if (unlikely(!cq)) {
13568                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13569                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13570                                         "0365 Slow-path CQ identifier "
13571                                         "(%d) does not exist\n", cqid);
13572                 return;
13573         }
13574
13575         /* Save EQ associated with this CQ */
13576         cq->assoc_qp = speq;
13577
13578         if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13579                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13580                                 "0390 Cannot schedule soft IRQ "
13581                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13582                                 cqid, cq->queue_id, raw_smp_processor_id());
13583 }
13584
13585 /**
13586  * __lpfc_sli4_process_cq - Process elements of a CQ
13587  * @phba: Pointer to HBA context object.
13588  * @cq: Pointer to CQ to be processed
13589  * @handler: Routine to process each cqe
13590  * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13591  *
13592  * This routine processes completion queue entries in a CQ. While a valid
13593  * queue element is found, the handler is called. During processing checks
13594  * are made for periodic doorbell writes to let the hardware know of
13595  * element consumption.
13596  *
13597  * If the max limit on cqes to process is hit, or there are no more valid
13598  * entries, the loop stops. If we processed a sufficient number of elements,
13599  * meaning there is sufficient load, rather than rearming and generating
13600  * another interrupt, a cq rescheduling delay will be set. A delay of 0
13601  * indicates no rescheduling.
13602  *
13603  * Returns True if work scheduled, False otherwise.
13604  **/
13605 static bool
13606 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13607         bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13608                         struct lpfc_cqe *), unsigned long *delay)
13609 {
13610         struct lpfc_cqe *cqe;
13611         bool workposted = false;
13612         int count = 0, consumed = 0;
13613         bool arm = true;
13614
13615         /* default - no reschedule */
13616         *delay = 0;
13617
13618         if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13619                 goto rearm_and_exit;
13620
13621         /* Process all the entries to the CQ */
13622         cq->q_flag = 0;
13623         cqe = lpfc_sli4_cq_get(cq);
13624         while (cqe) {
13625                 workposted |= handler(phba, cq, cqe);
13626                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13627
13628                 consumed++;
13629                 if (!(++count % cq->max_proc_limit))
13630                         break;
13631
13632                 if (!(count % cq->notify_interval)) {
13633                         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13634                                                 LPFC_QUEUE_NOARM);
13635                         consumed = 0;
13636                 }
13637
13638                 if (count == LPFC_NVMET_CQ_NOTIFY)
13639                         cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13640
13641                 cqe = lpfc_sli4_cq_get(cq);
13642         }
13643         if (count >= phba->cfg_cq_poll_threshold) {
13644                 *delay = 1;
13645                 arm = false;
13646         }
13647
13648         /* Track the max number of CQEs processed in 1 EQ */
13649         if (count > cq->CQ_max_cqe)
13650                 cq->CQ_max_cqe = count;
13651
13652         cq->assoc_qp->EQ_cqe_cnt += count;
13653
13654         /* Catch the no cq entry condition */
13655         if (unlikely(count == 0))
13656                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13657                                 "0369 No entry from completion queue "
13658                                 "qid=%d\n", cq->queue_id);
13659
13660         cq->queue_claimed = 0;
13661
13662 rearm_and_exit:
13663         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13664                         arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13665
13666         return workposted;
13667 }
13668
13669 /**
13670  * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13671  * @cq: pointer to CQ to process
13672  *
13673  * This routine calls the cq processing routine with a handler specific
13674  * to the type of queue bound to it.
13675  *
13676  * The CQ routine returns two values: the first is the calling status,
13677  * which indicates whether work was queued to the  background discovery
13678  * thread. If true, the routine should wakeup the discovery thread;
13679  * the second is the delay parameter. If non-zero, rather than rearming
13680  * the CQ and yet another interrupt, the CQ handler should be queued so
13681  * that it is processed in a subsequent polling action. The value of
13682  * the delay indicates when to reschedule it.
13683  **/
13684 static void
13685 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13686 {
13687         struct lpfc_hba *phba = cq->phba;
13688         unsigned long delay;
13689         bool workposted = false;
13690
13691         /* Process and rearm the CQ */
13692         switch (cq->type) {
13693         case LPFC_MCQ:
13694                 workposted |= __lpfc_sli4_process_cq(phba, cq,
13695                                                 lpfc_sli4_sp_handle_mcqe,
13696                                                 &delay);
13697                 break;
13698         case LPFC_WCQ:
13699                 if (cq->subtype == LPFC_IO)
13700                         workposted |= __lpfc_sli4_process_cq(phba, cq,
13701                                                 lpfc_sli4_fp_handle_cqe,
13702                                                 &delay);
13703                 else
13704                         workposted |= __lpfc_sli4_process_cq(phba, cq,
13705                                                 lpfc_sli4_sp_handle_cqe,
13706                                                 &delay);
13707                 break;
13708         default:
13709                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13710                                 "0370 Invalid completion queue type (%d)\n",
13711                                 cq->type);
13712                 return;
13713         }
13714
13715         if (delay) {
13716                 if (!queue_delayed_work_on(cq->chann, phba->wq,
13717                                            &cq->sched_spwork, delay))
13718                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13719                                 "0394 Cannot schedule soft IRQ "
13720                                 "for cqid=%d on CPU %d\n",
13721                                 cq->queue_id, cq->chann);
13722         }
13723
13724         /* wake up worker thread if there are works to be done */
13725         if (workposted)
13726                 lpfc_worker_wake_up(phba);
13727 }
13728
13729 /**
13730  * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13731  *   interrupt
13732  * @work: pointer to work element
13733  *
13734  * translates from the work handler and calls the slow-path handler.
13735  **/
13736 static void
13737 lpfc_sli4_sp_process_cq(struct work_struct *work)
13738 {
13739         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13740
13741         __lpfc_sli4_sp_process_cq(cq);
13742 }
13743
13744 /**
13745  * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13746  * @work: pointer to work element
13747  *
13748  * translates from the work handler and calls the slow-path handler.
13749  **/
13750 static void
13751 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13752 {
13753         struct lpfc_queue *cq = container_of(to_delayed_work(work),
13754                                         struct lpfc_queue, sched_spwork);
13755
13756         __lpfc_sli4_sp_process_cq(cq);
13757 }
13758
13759 /**
13760  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13761  * @phba: Pointer to HBA context object.
13762  * @cq: Pointer to associated CQ
13763  * @wcqe: Pointer to work-queue completion queue entry.
13764  *
13765  * This routine process a fast-path work queue completion entry from fast-path
13766  * event queue for FCP command response completion.
13767  **/
13768 static void
13769 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13770                              struct lpfc_wcqe_complete *wcqe)
13771 {
13772         struct lpfc_sli_ring *pring = cq->pring;
13773         struct lpfc_iocbq *cmdiocbq;
13774         struct lpfc_iocbq irspiocbq;
13775         unsigned long iflags;
13776
13777         /* Check for response status */
13778         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13779                 /* If resource errors reported from HBA, reduce queue
13780                  * depth of the SCSI device.
13781                  */
13782                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13783                      IOSTAT_LOCAL_REJECT)) &&
13784                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
13785                      IOERR_NO_RESOURCES))
13786                         phba->lpfc_rampdown_queue_depth(phba);
13787
13788                 /* Log the error status */
13789                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13790                                 "0373 FCP CQE error: status=x%x: "
13791                                 "CQE: %08x %08x %08x %08x\n",
13792                                 bf_get(lpfc_wcqe_c_status, wcqe),
13793                                 wcqe->word0, wcqe->total_data_placed,
13794                                 wcqe->parameter, wcqe->word3);
13795         }
13796
13797         /* Look up the FCP command IOCB and create pseudo response IOCB */
13798         spin_lock_irqsave(&pring->ring_lock, iflags);
13799         pring->stats.iocb_event++;
13800         spin_unlock_irqrestore(&pring->ring_lock, iflags);
13801         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13802                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13803         if (unlikely(!cmdiocbq)) {
13804                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13805                                 "0374 FCP complete with no corresponding "
13806                                 "cmdiocb: iotag (%d)\n",
13807                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13808                 return;
13809         }
13810 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13811         cmdiocbq->isr_timestamp = cq->isr_timestamp;
13812 #endif
13813         if (cmdiocbq->iocb_cmpl == NULL) {
13814                 if (cmdiocbq->wqe_cmpl) {
13815                         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13816                                 spin_lock_irqsave(&phba->hbalock, iflags);
13817                                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13818                                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13819                         }
13820
13821                         /* Pass the cmd_iocb and the wcqe to the upper layer */
13822                         (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13823                         return;
13824                 }
13825                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13826                                 "0375 FCP cmdiocb not callback function "
13827                                 "iotag: (%d)\n",
13828                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13829                 return;
13830         }
13831
13832         /* Fake the irspiocb and copy necessary response information */
13833         lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13834
13835         if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13836                 spin_lock_irqsave(&phba->hbalock, iflags);
13837                 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13838                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13839         }
13840
13841         /* Pass the cmd_iocb and the rsp state to the upper layer */
13842         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13843 }
13844
13845 /**
13846  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13847  * @phba: Pointer to HBA context object.
13848  * @cq: Pointer to completion queue.
13849  * @wcqe: Pointer to work-queue completion queue entry.
13850  *
13851  * This routine handles an fast-path WQ entry consumed event by invoking the
13852  * proper WQ release routine to the slow-path WQ.
13853  **/
13854 static void
13855 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13856                              struct lpfc_wcqe_release *wcqe)
13857 {
13858         struct lpfc_queue *childwq;
13859         bool wqid_matched = false;
13860         uint16_t hba_wqid;
13861
13862         /* Check for fast-path FCP work queue release */
13863         hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13864         list_for_each_entry(childwq, &cq->child_list, list) {
13865                 if (childwq->queue_id == hba_wqid) {
13866                         lpfc_sli4_wq_release(childwq,
13867                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13868                         if (childwq->q_flag & HBA_NVMET_WQFULL)
13869                                 lpfc_nvmet_wqfull_process(phba, childwq);
13870                         wqid_matched = true;
13871                         break;
13872                 }
13873         }
13874         /* Report warning log message if no match found */
13875         if (wqid_matched != true)
13876                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13877                                 "2580 Fast-path wqe consume event carries "
13878                                 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13879 }
13880
13881 /**
13882  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13883  * @phba: Pointer to HBA context object.
13884  * @rcqe: Pointer to receive-queue completion queue entry.
13885  *
13886  * This routine process a receive-queue completion queue entry.
13887  *
13888  * Return: true if work posted to worker thread, otherwise false.
13889  **/
13890 static bool
13891 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13892                             struct lpfc_rcqe *rcqe)
13893 {
13894         bool workposted = false;
13895         struct lpfc_queue *hrq;
13896         struct lpfc_queue *drq;
13897         struct rqb_dmabuf *dma_buf;
13898         struct fc_frame_header *fc_hdr;
13899         struct lpfc_nvmet_tgtport *tgtp;
13900         uint32_t status, rq_id;
13901         unsigned long iflags;
13902         uint32_t fctl, idx;
13903
13904         if ((phba->nvmet_support == 0) ||
13905             (phba->sli4_hba.nvmet_cqset == NULL))
13906                 return workposted;
13907
13908         idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13909         hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13910         drq = phba->sli4_hba.nvmet_mrq_data[idx];
13911
13912         /* sanity check on queue memory */
13913         if (unlikely(!hrq) || unlikely(!drq))
13914                 return workposted;
13915
13916         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13917                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13918         else
13919                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13920
13921         if ((phba->nvmet_support == 0) ||
13922             (rq_id != hrq->queue_id))
13923                 return workposted;
13924
13925         status = bf_get(lpfc_rcqe_status, rcqe);
13926         switch (status) {
13927         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13928                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13929                                 "6126 Receive Frame Truncated!!\n");
13930                 /* fall through */
13931         case FC_STATUS_RQ_SUCCESS:
13932                 spin_lock_irqsave(&phba->hbalock, iflags);
13933                 lpfc_sli4_rq_release(hrq, drq);
13934                 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13935                 if (!dma_buf) {
13936                         hrq->RQ_no_buf_found++;
13937                         spin_unlock_irqrestore(&phba->hbalock, iflags);
13938                         goto out;
13939                 }
13940                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13941                 hrq->RQ_rcv_buf++;
13942                 hrq->RQ_buf_posted--;
13943                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13944
13945                 /* Just some basic sanity checks on FCP Command frame */
13946                 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13947                 fc_hdr->fh_f_ctl[1] << 8 |
13948                 fc_hdr->fh_f_ctl[2]);
13949                 if (((fctl &
13950                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13951                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13952                     (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13953                         goto drop;
13954
13955                 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13956                         dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13957                         lpfc_nvmet_unsol_fcp_event(
13958                                 phba, idx, dma_buf, cq->isr_timestamp,
13959                                 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
13960                         return false;
13961                 }
13962 drop:
13963                 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
13964                 break;
13965         case FC_STATUS_INSUFF_BUF_FRM_DISC:
13966                 if (phba->nvmet_support) {
13967                         tgtp = phba->targetport->private;
13968                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13969                                         "6401 RQE Error x%x, posted %d err_cnt "
13970                                         "%d: %x %x %x\n",
13971                                         status, hrq->RQ_buf_posted,
13972                                         hrq->RQ_no_posted_buf,
13973                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
13974                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
13975                                         atomic_read(&tgtp->xmt_fcp_release));
13976                 }
13977                 /* fallthrough */
13978
13979         case FC_STATUS_INSUFF_BUF_NEED_BUF:
13980                 hrq->RQ_no_posted_buf++;
13981                 /* Post more buffers if possible */
13982                 break;
13983         }
13984 out:
13985         return workposted;
13986 }
13987
13988 /**
13989  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13990  * @phba: adapter with cq
13991  * @cq: Pointer to the completion queue.
13992  * @eqe: Pointer to fast-path completion queue entry.
13993  *
13994  * This routine process a fast-path work queue completion entry from fast-path
13995  * event queue for FCP command response completion.
13996  *
13997  * Return: true if work posted to worker thread, otherwise false.
13998  **/
13999 static bool
14000 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14001                          struct lpfc_cqe *cqe)
14002 {
14003         struct lpfc_wcqe_release wcqe;
14004         bool workposted = false;
14005
14006         /* Copy the work queue CQE and convert endian order if needed */
14007         lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14008
14009         /* Check and process for different type of WCQE and dispatch */
14010         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14011         case CQE_CODE_COMPL_WQE:
14012         case CQE_CODE_NVME_ERSP:
14013                 cq->CQ_wq++;
14014                 /* Process the WQ complete event */
14015                 phba->last_completion_time = jiffies;
14016                 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14017                         lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14018                                 (struct lpfc_wcqe_complete *)&wcqe);
14019                 break;
14020         case CQE_CODE_RELEASE_WQE:
14021                 cq->CQ_release_wqe++;
14022                 /* Process the WQ release event */
14023                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14024                                 (struct lpfc_wcqe_release *)&wcqe);
14025                 break;
14026         case CQE_CODE_XRI_ABORTED:
14027                 cq->CQ_xri_aborted++;
14028                 /* Process the WQ XRI abort event */
14029                 phba->last_completion_time = jiffies;
14030                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14031                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
14032                 break;
14033         case CQE_CODE_RECEIVE_V1:
14034         case CQE_CODE_RECEIVE:
14035                 phba->last_completion_time = jiffies;
14036                 if (cq->subtype == LPFC_NVMET) {
14037                         workposted = lpfc_sli4_nvmet_handle_rcqe(
14038                                 phba, cq, (struct lpfc_rcqe *)&wcqe);
14039                 }
14040                 break;
14041         default:
14042                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14043                                 "0144 Not a valid CQE code: x%x\n",
14044                                 bf_get(lpfc_wcqe_c_code, &wcqe));
14045                 break;
14046         }
14047         return workposted;
14048 }
14049
14050 /**
14051  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14052  * @phba: Pointer to HBA context object.
14053  * @eqe: Pointer to fast-path event queue entry.
14054  *
14055  * This routine process a event queue entry from the fast-path event queue.
14056  * It will check the MajorCode and MinorCode to determine this is for a
14057  * completion event on a completion queue, if not, an error shall be logged
14058  * and just return. Otherwise, it will get to the corresponding completion
14059  * queue and process all the entries on the completion queue, rearm the
14060  * completion queue, and then return.
14061  **/
14062 static void
14063 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14064                          struct lpfc_eqe *eqe)
14065 {
14066         struct lpfc_queue *cq = NULL;
14067         uint32_t qidx = eq->hdwq;
14068         uint16_t cqid, id;
14069
14070         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14071                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14072                                 "0366 Not a valid completion "
14073                                 "event: majorcode=x%x, minorcode=x%x\n",
14074                                 bf_get_le32(lpfc_eqe_major_code, eqe),
14075                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
14076                 return;
14077         }
14078
14079         /* Get the reference to the corresponding CQ */
14080         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14081
14082         /* Use the fast lookup method first */
14083         if (cqid <= phba->sli4_hba.cq_max) {
14084                 cq = phba->sli4_hba.cq_lookup[cqid];
14085                 if (cq)
14086                         goto  work_cq;
14087         }
14088
14089         /* Next check for NVMET completion */
14090         if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14091                 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14092                 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14093                         /* Process NVMET unsol rcv */
14094                         cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14095                         goto  process_cq;
14096                 }
14097         }
14098
14099         if (phba->sli4_hba.nvmels_cq &&
14100             (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14101                 /* Process NVME unsol rcv */
14102                 cq = phba->sli4_hba.nvmels_cq;
14103         }
14104
14105         /* Otherwise this is a Slow path event */
14106         if (cq == NULL) {
14107                 lpfc_sli4_sp_handle_eqe(phba, eqe,
14108                                         phba->sli4_hba.hdwq[qidx].hba_eq);
14109                 return;
14110         }
14111
14112 process_cq:
14113         if (unlikely(cqid != cq->queue_id)) {
14114                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14115                                 "0368 Miss-matched fast-path completion "
14116                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14117                                 cqid, cq->queue_id);
14118                 return;
14119         }
14120
14121 work_cq:
14122 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14123         if (phba->ktime_on)
14124                 cq->isr_timestamp = ktime_get_ns();
14125         else
14126                 cq->isr_timestamp = 0;
14127 #endif
14128         if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14129                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14130                                 "0363 Cannot schedule soft IRQ "
14131                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14132                                 cqid, cq->queue_id, raw_smp_processor_id());
14133 }
14134
14135 /**
14136  * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14137  * @cq: Pointer to CQ to be processed
14138  *
14139  * This routine calls the cq processing routine with the handler for
14140  * fast path CQEs.
14141  *
14142  * The CQ routine returns two values: the first is the calling status,
14143  * which indicates whether work was queued to the  background discovery
14144  * thread. If true, the routine should wakeup the discovery thread;
14145  * the second is the delay parameter. If non-zero, rather than rearming
14146  * the CQ and yet another interrupt, the CQ handler should be queued so
14147  * that it is processed in a subsequent polling action. The value of
14148  * the delay indicates when to reschedule it.
14149  **/
14150 static void
14151 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14152 {
14153         struct lpfc_hba *phba = cq->phba;
14154         unsigned long delay;
14155         bool workposted = false;
14156
14157         /* process and rearm the CQ */
14158         workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14159                                              &delay);
14160
14161         if (delay) {
14162                 if (!queue_delayed_work_on(cq->chann, phba->wq,
14163                                            &cq->sched_irqwork, delay))
14164                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14165                                 "0367 Cannot schedule soft IRQ "
14166                                 "for cqid=%d on CPU %d\n",
14167                                 cq->queue_id, cq->chann);
14168         }
14169
14170         /* wake up worker thread if there are works to be done */
14171         if (workposted)
14172                 lpfc_worker_wake_up(phba);
14173 }
14174
14175 /**
14176  * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14177  *   interrupt
14178  * @work: pointer to work element
14179  *
14180  * translates from the work handler and calls the fast-path handler.
14181  **/
14182 static void
14183 lpfc_sli4_hba_process_cq(struct work_struct *work)
14184 {
14185         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14186
14187         __lpfc_sli4_hba_process_cq(cq);
14188 }
14189
14190 /**
14191  * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14192  * @work: pointer to work element
14193  *
14194  * translates from the work handler and calls the fast-path handler.
14195  **/
14196 static void
14197 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14198 {
14199         struct lpfc_queue *cq = container_of(to_delayed_work(work),
14200                                         struct lpfc_queue, sched_irqwork);
14201
14202         __lpfc_sli4_hba_process_cq(cq);
14203 }
14204
14205 /**
14206  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14207  * @irq: Interrupt number.
14208  * @dev_id: The device context pointer.
14209  *
14210  * This function is directly called from the PCI layer as an interrupt
14211  * service routine when device with SLI-4 interface spec is enabled with
14212  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14213  * ring event in the HBA. However, when the device is enabled with either
14214  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14215  * device-level interrupt handler. When the PCI slot is in error recovery
14216  * or the HBA is undergoing initialization, the interrupt handler will not
14217  * process the interrupt. The SCSI FCP fast-path ring event are handled in
14218  * the intrrupt context. This function is called without any lock held.
14219  * It gets the hbalock to access and update SLI data structures. Note that,
14220  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14221  * equal to that of FCP CQ index.
14222  *
14223  * The link attention and ELS ring attention events are handled
14224  * by the worker thread. The interrupt handler signals the worker thread
14225  * and returns for these events. This function is called without any lock
14226  * held. It gets the hbalock to access and update SLI data structures.
14227  *
14228  * This function returns IRQ_HANDLED when interrupt is handled else it
14229  * returns IRQ_NONE.
14230  **/
14231 irqreturn_t
14232 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14233 {
14234         struct lpfc_hba *phba;
14235         struct lpfc_hba_eq_hdl *hba_eq_hdl;
14236         struct lpfc_queue *fpeq;
14237         unsigned long iflag;
14238         int ecount = 0;
14239         int hba_eqidx;
14240         struct lpfc_eq_intr_info *eqi;
14241         uint32_t icnt;
14242
14243         /* Get the driver's phba structure from the dev_id */
14244         hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14245         phba = hba_eq_hdl->phba;
14246         hba_eqidx = hba_eq_hdl->idx;
14247
14248         if (unlikely(!phba))
14249                 return IRQ_NONE;
14250         if (unlikely(!phba->sli4_hba.hdwq))
14251                 return IRQ_NONE;
14252
14253         /* Get to the EQ struct associated with this vector */
14254         fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14255         if (unlikely(!fpeq))
14256                 return IRQ_NONE;
14257
14258         /* Check device state for handling interrupt */
14259         if (unlikely(lpfc_intr_state_check(phba))) {
14260                 /* Check again for link_state with lock held */
14261                 spin_lock_irqsave(&phba->hbalock, iflag);
14262                 if (phba->link_state < LPFC_LINK_DOWN)
14263                         /* Flush, clear interrupt, and rearm the EQ */
14264                         lpfc_sli4_eqcq_flush(phba, fpeq);
14265                 spin_unlock_irqrestore(&phba->hbalock, iflag);
14266                 return IRQ_NONE;
14267         }
14268
14269         eqi = phba->sli4_hba.eq_info;
14270         icnt = this_cpu_inc_return(eqi->icnt);
14271         fpeq->last_cpu = raw_smp_processor_id();
14272
14273         if (icnt > LPFC_EQD_ISR_TRIGGER &&
14274             phba->cfg_irq_chann == 1 &&
14275             phba->cfg_auto_imax &&
14276             fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14277             phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14278                 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14279
14280         /* process and rearm the EQ */
14281         ecount = lpfc_sli4_process_eq(phba, fpeq);
14282
14283         if (unlikely(ecount == 0)) {
14284                 fpeq->EQ_no_entry++;
14285                 if (phba->intr_type == MSIX)
14286                         /* MSI-X treated interrupt served as no EQ share INT */
14287                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14288                                         "0358 MSI-X interrupt with no EQE\n");
14289                 else
14290                         /* Non MSI-X treated on interrupt as EQ share INT */
14291                         return IRQ_NONE;
14292         }
14293
14294         return IRQ_HANDLED;
14295 } /* lpfc_sli4_fp_intr_handler */
14296
14297 /**
14298  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14299  * @irq: Interrupt number.
14300  * @dev_id: The device context pointer.
14301  *
14302  * This function is the device-level interrupt handler to device with SLI-4
14303  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14304  * interrupt mode is enabled and there is an event in the HBA which requires
14305  * driver attention. This function invokes the slow-path interrupt attention
14306  * handling function and fast-path interrupt attention handling function in
14307  * turn to process the relevant HBA attention events. This function is called
14308  * without any lock held. It gets the hbalock to access and update SLI data
14309  * structures.
14310  *
14311  * This function returns IRQ_HANDLED when interrupt is handled, else it
14312  * returns IRQ_NONE.
14313  **/
14314 irqreturn_t
14315 lpfc_sli4_intr_handler(int irq, void *dev_id)
14316 {
14317         struct lpfc_hba  *phba;
14318         irqreturn_t hba_irq_rc;
14319         bool hba_handled = false;
14320         int qidx;
14321
14322         /* Get the driver's phba structure from the dev_id */
14323         phba = (struct lpfc_hba *)dev_id;
14324
14325         if (unlikely(!phba))
14326                 return IRQ_NONE;
14327
14328         /*
14329          * Invoke fast-path host attention interrupt handling as appropriate.
14330          */
14331         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14332                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14333                                         &phba->sli4_hba.hba_eq_hdl[qidx]);
14334                 if (hba_irq_rc == IRQ_HANDLED)
14335                         hba_handled |= true;
14336         }
14337
14338         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14339 } /* lpfc_sli4_intr_handler */
14340
14341 /**
14342  * lpfc_sli4_queue_free - free a queue structure and associated memory
14343  * @queue: The queue structure to free.
14344  *
14345  * This function frees a queue structure and the DMAable memory used for
14346  * the host resident queue. This function must be called after destroying the
14347  * queue on the HBA.
14348  **/
14349 void
14350 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14351 {
14352         struct lpfc_dmabuf *dmabuf;
14353
14354         if (!queue)
14355                 return;
14356
14357         if (!list_empty(&queue->wq_list))
14358                 list_del(&queue->wq_list);
14359
14360         while (!list_empty(&queue->page_list)) {
14361                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14362                                  list);
14363                 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14364                                   dmabuf->virt, dmabuf->phys);
14365                 kfree(dmabuf);
14366         }
14367         if (queue->rqbp) {
14368                 lpfc_free_rq_buffer(queue->phba, queue);
14369                 kfree(queue->rqbp);
14370         }
14371
14372         if (!list_empty(&queue->cpu_list))
14373                 list_del(&queue->cpu_list);
14374
14375         kfree(queue);
14376         return;
14377 }
14378
14379 /**
14380  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14381  * @phba: The HBA that this queue is being created on.
14382  * @page_size: The size of a queue page
14383  * @entry_size: The size of each queue entry for this queue.
14384  * @entry count: The number of entries that this queue will handle.
14385  * @cpu: The cpu that will primarily utilize this queue.
14386  *
14387  * This function allocates a queue structure and the DMAable memory used for
14388  * the host resident queue. This function must be called before creating the
14389  * queue on the HBA.
14390  **/
14391 struct lpfc_queue *
14392 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14393                       uint32_t entry_size, uint32_t entry_count, int cpu)
14394 {
14395         struct lpfc_queue *queue;
14396         struct lpfc_dmabuf *dmabuf;
14397         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14398         uint16_t x, pgcnt;
14399
14400         if (!phba->sli4_hba.pc_sli4_params.supported)
14401                 hw_page_size = page_size;
14402
14403         pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14404
14405         /* If needed, Adjust page count to match the max the adapter supports */
14406         if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14407                 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14408
14409         queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14410                              GFP_KERNEL, cpu_to_node(cpu));
14411         if (!queue)
14412                 return NULL;
14413
14414         INIT_LIST_HEAD(&queue->list);
14415         INIT_LIST_HEAD(&queue->wq_list);
14416         INIT_LIST_HEAD(&queue->wqfull_list);
14417         INIT_LIST_HEAD(&queue->page_list);
14418         INIT_LIST_HEAD(&queue->child_list);
14419         INIT_LIST_HEAD(&queue->cpu_list);
14420
14421         /* Set queue parameters now.  If the system cannot provide memory
14422          * resources, the free routine needs to know what was allocated.
14423          */
14424         queue->page_count = pgcnt;
14425         queue->q_pgs = (void **)&queue[1];
14426         queue->entry_cnt_per_pg = hw_page_size / entry_size;
14427         queue->entry_size = entry_size;
14428         queue->entry_count = entry_count;
14429         queue->page_size = hw_page_size;
14430         queue->phba = phba;
14431
14432         for (x = 0; x < queue->page_count; x++) {
14433                 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14434                                       dev_to_node(&phba->pcidev->dev));
14435                 if (!dmabuf)
14436                         goto out_fail;
14437                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14438                                                   hw_page_size, &dmabuf->phys,
14439                                                   GFP_KERNEL);
14440                 if (!dmabuf->virt) {
14441                         kfree(dmabuf);
14442                         goto out_fail;
14443                 }
14444                 dmabuf->buffer_tag = x;
14445                 list_add_tail(&dmabuf->list, &queue->page_list);
14446                 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14447                 queue->q_pgs[x] = dmabuf->virt;
14448         }
14449         INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14450         INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14451         INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14452         INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14453
14454         /* notify_interval will be set during q creation */
14455
14456         return queue;
14457 out_fail:
14458         lpfc_sli4_queue_free(queue);
14459         return NULL;
14460 }
14461
14462 /**
14463  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14464  * @phba: HBA structure that indicates port to create a queue on.
14465  * @pci_barset: PCI BAR set flag.
14466  *
14467  * This function shall perform iomap of the specified PCI BAR address to host
14468  * memory address if not already done so and return it. The returned host
14469  * memory address can be NULL.
14470  */
14471 static void __iomem *
14472 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14473 {
14474         if (!phba->pcidev)
14475                 return NULL;
14476
14477         switch (pci_barset) {
14478         case WQ_PCI_BAR_0_AND_1:
14479                 return phba->pci_bar0_memmap_p;
14480         case WQ_PCI_BAR_2_AND_3:
14481                 return phba->pci_bar2_memmap_p;
14482         case WQ_PCI_BAR_4_AND_5:
14483                 return phba->pci_bar4_memmap_p;
14484         default:
14485                 break;
14486         }
14487         return NULL;
14488 }
14489
14490 /**
14491  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14492  * @phba: HBA structure that EQs are on.
14493  * @startq: The starting EQ index to modify
14494  * @numq: The number of EQs (consecutive indexes) to modify
14495  * @usdelay: amount of delay
14496  *
14497  * This function revises the EQ delay on 1 or more EQs. The EQ delay
14498  * is set either by writing to a register (if supported by the SLI Port)
14499  * or by mailbox command. The mailbox command allows several EQs to be
14500  * updated at once.
14501  *
14502  * The @phba struct is used to send a mailbox command to HBA. The @startq
14503  * is used to get the starting EQ index to change. The @numq value is
14504  * used to specify how many consecutive EQ indexes, starting at EQ index,
14505  * are to be changed. This function is asynchronous and will wait for any
14506  * mailbox commands to finish before returning.
14507  *
14508  * On success this function will return a zero. If unable to allocate
14509  * enough memory this function will return -ENOMEM. If a mailbox command
14510  * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14511  * have had their delay multipler changed.
14512  **/
14513 void
14514 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14515                          uint32_t numq, uint32_t usdelay)
14516 {
14517         struct lpfc_mbx_modify_eq_delay *eq_delay;
14518         LPFC_MBOXQ_t *mbox;
14519         struct lpfc_queue *eq;
14520         int cnt = 0, rc, length;
14521         uint32_t shdr_status, shdr_add_status;
14522         uint32_t dmult;
14523         int qidx;
14524         union lpfc_sli4_cfg_shdr *shdr;
14525
14526         if (startq >= phba->cfg_irq_chann)
14527                 return;
14528
14529         if (usdelay > 0xFFFF) {
14530                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14531                                 "6429 usdelay %d too large. Scaled down to "
14532                                 "0xFFFF.\n", usdelay);
14533                 usdelay = 0xFFFF;
14534         }
14535
14536         /* set values by EQ_DELAY register if supported */
14537         if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14538                 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14539                         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14540                         if (!eq)
14541                                 continue;
14542
14543                         lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14544
14545                         if (++cnt >= numq)
14546                                 break;
14547                 }
14548                 return;
14549         }
14550
14551         /* Otherwise, set values by mailbox cmd */
14552
14553         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14554         if (!mbox) {
14555                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14556                                 "6428 Failed allocating mailbox cmd buffer."
14557                                 " EQ delay was not set.\n");
14558                 return;
14559         }
14560         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14561                   sizeof(struct lpfc_sli4_cfg_mhdr));
14562         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14563                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14564                          length, LPFC_SLI4_MBX_EMBED);
14565         eq_delay = &mbox->u.mqe.un.eq_delay;
14566
14567         /* Calculate delay multiper from maximum interrupt per second */
14568         dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14569         if (dmult)
14570                 dmult--;
14571         if (dmult > LPFC_DMULT_MAX)
14572                 dmult = LPFC_DMULT_MAX;
14573
14574         for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14575                 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14576                 if (!eq)
14577                         continue;
14578                 eq->q_mode = usdelay;
14579                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14580                 eq_delay->u.request.eq[cnt].phase = 0;
14581                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14582
14583                 if (++cnt >= numq)
14584                         break;
14585         }
14586         eq_delay->u.request.num_eq = cnt;
14587
14588         mbox->vport = phba->pport;
14589         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14590         mbox->ctx_buf = NULL;
14591         mbox->ctx_ndlp = NULL;
14592         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14593         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14594         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14595         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14596         if (shdr_status || shdr_add_status || rc) {
14597                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14598                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
14599                                 "status x%x add_status x%x, mbx status x%x\n",
14600                                 shdr_status, shdr_add_status, rc);
14601         }
14602         mempool_free(mbox, phba->mbox_mem_pool);
14603         return;
14604 }
14605
14606 /**
14607  * lpfc_eq_create - Create an Event Queue on the HBA
14608  * @phba: HBA structure that indicates port to create a queue on.
14609  * @eq: The queue structure to use to create the event queue.
14610  * @imax: The maximum interrupt per second limit.
14611  *
14612  * This function creates an event queue, as detailed in @eq, on a port,
14613  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14614  *
14615  * The @phba struct is used to send mailbox command to HBA. The @eq struct
14616  * is used to get the entry count and entry size that are necessary to
14617  * determine the number of pages to allocate and use for this queue. This
14618  * function will send the EQ_CREATE mailbox command to the HBA to setup the
14619  * event queue. This function is asynchronous and will wait for the mailbox
14620  * command to finish before continuing.
14621  *
14622  * On success this function will return a zero. If unable to allocate enough
14623  * memory this function will return -ENOMEM. If the queue create mailbox command
14624  * fails this function will return -ENXIO.
14625  **/
14626 int
14627 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14628 {
14629         struct lpfc_mbx_eq_create *eq_create;
14630         LPFC_MBOXQ_t *mbox;
14631         int rc, length, status = 0;
14632         struct lpfc_dmabuf *dmabuf;
14633         uint32_t shdr_status, shdr_add_status;
14634         union lpfc_sli4_cfg_shdr *shdr;
14635         uint16_t dmult;
14636         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14637
14638         /* sanity check on queue memory */
14639         if (!eq)
14640                 return -ENODEV;
14641         if (!phba->sli4_hba.pc_sli4_params.supported)
14642                 hw_page_size = SLI4_PAGE_SIZE;
14643
14644         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14645         if (!mbox)
14646                 return -ENOMEM;
14647         length = (sizeof(struct lpfc_mbx_eq_create) -
14648                   sizeof(struct lpfc_sli4_cfg_mhdr));
14649         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14650                          LPFC_MBOX_OPCODE_EQ_CREATE,
14651                          length, LPFC_SLI4_MBX_EMBED);
14652         eq_create = &mbox->u.mqe.un.eq_create;
14653         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14654         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14655                eq->page_count);
14656         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14657                LPFC_EQE_SIZE);
14658         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14659
14660         /* Use version 2 of CREATE_EQ if eqav is set */
14661         if (phba->sli4_hba.pc_sli4_params.eqav) {
14662                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14663                        LPFC_Q_CREATE_VERSION_2);
14664                 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14665                        phba->sli4_hba.pc_sli4_params.eqav);
14666         }
14667
14668         /* don't setup delay multiplier using EQ_CREATE */
14669         dmult = 0;
14670         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14671                dmult);
14672         switch (eq->entry_count) {
14673         default:
14674                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14675                                 "0360 Unsupported EQ count. (%d)\n",
14676                                 eq->entry_count);
14677                 if (eq->entry_count < 256) {
14678                         status = -EINVAL;
14679                         goto out;
14680                 }
14681                 /* fall through - otherwise default to smallest count */
14682         case 256:
14683                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14684                        LPFC_EQ_CNT_256);
14685                 break;
14686         case 512:
14687                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14688                        LPFC_EQ_CNT_512);
14689                 break;
14690         case 1024:
14691                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14692                        LPFC_EQ_CNT_1024);
14693                 break;
14694         case 2048:
14695                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14696                        LPFC_EQ_CNT_2048);
14697                 break;
14698         case 4096:
14699                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14700                        LPFC_EQ_CNT_4096);
14701                 break;
14702         }
14703         list_for_each_entry(dmabuf, &eq->page_list, list) {
14704                 memset(dmabuf->virt, 0, hw_page_size);
14705                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14706                                         putPaddrLow(dmabuf->phys);
14707                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14708                                         putPaddrHigh(dmabuf->phys);
14709         }
14710         mbox->vport = phba->pport;
14711         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14712         mbox->ctx_buf = NULL;
14713         mbox->ctx_ndlp = NULL;
14714         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14715         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14716         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14717         if (shdr_status || shdr_add_status || rc) {
14718                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14719                                 "2500 EQ_CREATE mailbox failed with "
14720                                 "status x%x add_status x%x, mbx status x%x\n",
14721                                 shdr_status, shdr_add_status, rc);
14722                 status = -ENXIO;
14723         }
14724         eq->type = LPFC_EQ;
14725         eq->subtype = LPFC_NONE;
14726         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14727         if (eq->queue_id == 0xFFFF)
14728                 status = -ENXIO;
14729         eq->host_index = 0;
14730         eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14731         eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14732 out:
14733         mempool_free(mbox, phba->mbox_mem_pool);
14734         return status;
14735 }
14736
14737 /**
14738  * lpfc_cq_create - Create a Completion Queue on the HBA
14739  * @phba: HBA structure that indicates port to create a queue on.
14740  * @cq: The queue structure to use to create the completion queue.
14741  * @eq: The event queue to bind this completion queue to.
14742  *
14743  * This function creates a completion queue, as detailed in @wq, on a port,
14744  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14745  *
14746  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14747  * is used to get the entry count and entry size that are necessary to
14748  * determine the number of pages to allocate and use for this queue. The @eq
14749  * is used to indicate which event queue to bind this completion queue to. This
14750  * function will send the CQ_CREATE mailbox command to the HBA to setup the
14751  * completion queue. This function is asynchronous and will wait for the mailbox
14752  * command to finish before continuing.
14753  *
14754  * On success this function will return a zero. If unable to allocate enough
14755  * memory this function will return -ENOMEM. If the queue create mailbox command
14756  * fails this function will return -ENXIO.
14757  **/
14758 int
14759 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14760                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14761 {
14762         struct lpfc_mbx_cq_create *cq_create;
14763         struct lpfc_dmabuf *dmabuf;
14764         LPFC_MBOXQ_t *mbox;
14765         int rc, length, status = 0;
14766         uint32_t shdr_status, shdr_add_status;
14767         union lpfc_sli4_cfg_shdr *shdr;
14768
14769         /* sanity check on queue memory */
14770         if (!cq || !eq)
14771                 return -ENODEV;
14772
14773         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14774         if (!mbox)
14775                 return -ENOMEM;
14776         length = (sizeof(struct lpfc_mbx_cq_create) -
14777                   sizeof(struct lpfc_sli4_cfg_mhdr));
14778         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14779                          LPFC_MBOX_OPCODE_CQ_CREATE,
14780                          length, LPFC_SLI4_MBX_EMBED);
14781         cq_create = &mbox->u.mqe.un.cq_create;
14782         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14783         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14784                     cq->page_count);
14785         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14786         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14787         bf_set(lpfc_mbox_hdr_version, &shdr->request,
14788                phba->sli4_hba.pc_sli4_params.cqv);
14789         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14790                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14791                        (cq->page_size / SLI4_PAGE_SIZE));
14792                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14793                        eq->queue_id);
14794                 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14795                        phba->sli4_hba.pc_sli4_params.cqav);
14796         } else {
14797                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14798                        eq->queue_id);
14799         }
14800         switch (cq->entry_count) {
14801         case 2048:
14802         case 4096:
14803                 if (phba->sli4_hba.pc_sli4_params.cqv ==
14804                     LPFC_Q_CREATE_VERSION_2) {
14805                         cq_create->u.request.context.lpfc_cq_context_count =
14806                                 cq->entry_count;
14807                         bf_set(lpfc_cq_context_count,
14808                                &cq_create->u.request.context,
14809                                LPFC_CQ_CNT_WORD7);
14810                         break;
14811                 }
14812                 /* fall through */
14813         default:
14814                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14815                                 "0361 Unsupported CQ count: "
14816                                 "entry cnt %d sz %d pg cnt %d\n",
14817                                 cq->entry_count, cq->entry_size,
14818                                 cq->page_count);
14819                 if (cq->entry_count < 256) {
14820                         status = -EINVAL;
14821                         goto out;
14822                 }
14823                 /* fall through - otherwise default to smallest count */
14824         case 256:
14825                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14826                        LPFC_CQ_CNT_256);
14827                 break;
14828         case 512:
14829                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14830                        LPFC_CQ_CNT_512);
14831                 break;
14832         case 1024:
14833                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14834                        LPFC_CQ_CNT_1024);
14835                 break;
14836         }
14837         list_for_each_entry(dmabuf, &cq->page_list, list) {
14838                 memset(dmabuf->virt, 0, cq->page_size);
14839                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14840                                         putPaddrLow(dmabuf->phys);
14841                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14842                                         putPaddrHigh(dmabuf->phys);
14843         }
14844         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14845
14846         /* The IOCTL status is embedded in the mailbox subheader. */
14847         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14848         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14849         if (shdr_status || shdr_add_status || rc) {
14850                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14851                                 "2501 CQ_CREATE mailbox failed with "
14852                                 "status x%x add_status x%x, mbx status x%x\n",
14853                                 shdr_status, shdr_add_status, rc);
14854                 status = -ENXIO;
14855                 goto out;
14856         }
14857         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14858         if (cq->queue_id == 0xFFFF) {
14859                 status = -ENXIO;
14860                 goto out;
14861         }
14862         /* link the cq onto the parent eq child list */
14863         list_add_tail(&cq->list, &eq->child_list);
14864         /* Set up completion queue's type and subtype */
14865         cq->type = type;
14866         cq->subtype = subtype;
14867         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14868         cq->assoc_qid = eq->queue_id;
14869         cq->assoc_qp = eq;
14870         cq->host_index = 0;
14871         cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14872         cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
14873
14874         if (cq->queue_id > phba->sli4_hba.cq_max)
14875                 phba->sli4_hba.cq_max = cq->queue_id;
14876 out:
14877         mempool_free(mbox, phba->mbox_mem_pool);
14878         return status;
14879 }
14880
14881 /**
14882  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14883  * @phba: HBA structure that indicates port to create a queue on.
14884  * @cqp: The queue structure array to use to create the completion queues.
14885  * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
14886  *
14887  * This function creates a set of  completion queue, s to support MRQ
14888  * as detailed in @cqp, on a port,
14889  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14890  *
14891  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14892  * is used to get the entry count and entry size that are necessary to
14893  * determine the number of pages to allocate and use for this queue. The @eq
14894  * is used to indicate which event queue to bind this completion queue to. This
14895  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14896  * completion queue. This function is asynchronous and will wait for the mailbox
14897  * command to finish before continuing.
14898  *
14899  * On success this function will return a zero. If unable to allocate enough
14900  * memory this function will return -ENOMEM. If the queue create mailbox command
14901  * fails this function will return -ENXIO.
14902  **/
14903 int
14904 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14905                    struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
14906                    uint32_t subtype)
14907 {
14908         struct lpfc_queue *cq;
14909         struct lpfc_queue *eq;
14910         struct lpfc_mbx_cq_create_set *cq_set;
14911         struct lpfc_dmabuf *dmabuf;
14912         LPFC_MBOXQ_t *mbox;
14913         int rc, length, alloclen, status = 0;
14914         int cnt, idx, numcq, page_idx = 0;
14915         uint32_t shdr_status, shdr_add_status;
14916         union lpfc_sli4_cfg_shdr *shdr;
14917         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14918
14919         /* sanity check on queue memory */
14920         numcq = phba->cfg_nvmet_mrq;
14921         if (!cqp || !hdwq || !numcq)
14922                 return -ENODEV;
14923
14924         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14925         if (!mbox)
14926                 return -ENOMEM;
14927
14928         length = sizeof(struct lpfc_mbx_cq_create_set);
14929         length += ((numcq * cqp[0]->page_count) *
14930                    sizeof(struct dma_address));
14931         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14932                         LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14933                         LPFC_SLI4_MBX_NEMBED);
14934         if (alloclen < length) {
14935                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14936                                 "3098 Allocated DMA memory size (%d) is "
14937                                 "less than the requested DMA memory size "
14938                                 "(%d)\n", alloclen, length);
14939                 status = -ENOMEM;
14940                 goto out;
14941         }
14942         cq_set = mbox->sge_array->addr[0];
14943         shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14944         bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14945
14946         for (idx = 0; idx < numcq; idx++) {
14947                 cq = cqp[idx];
14948                 eq = hdwq[idx].hba_eq;
14949                 if (!cq || !eq) {
14950                         status = -ENOMEM;
14951                         goto out;
14952                 }
14953                 if (!phba->sli4_hba.pc_sli4_params.supported)
14954                         hw_page_size = cq->page_size;
14955
14956                 switch (idx) {
14957                 case 0:
14958                         bf_set(lpfc_mbx_cq_create_set_page_size,
14959                                &cq_set->u.request,
14960                                (hw_page_size / SLI4_PAGE_SIZE));
14961                         bf_set(lpfc_mbx_cq_create_set_num_pages,
14962                                &cq_set->u.request, cq->page_count);
14963                         bf_set(lpfc_mbx_cq_create_set_evt,
14964                                &cq_set->u.request, 1);
14965                         bf_set(lpfc_mbx_cq_create_set_valid,
14966                                &cq_set->u.request, 1);
14967                         bf_set(lpfc_mbx_cq_create_set_cqe_size,
14968                                &cq_set->u.request, 0);
14969                         bf_set(lpfc_mbx_cq_create_set_num_cq,
14970                                &cq_set->u.request, numcq);
14971                         bf_set(lpfc_mbx_cq_create_set_autovalid,
14972                                &cq_set->u.request,
14973                                phba->sli4_hba.pc_sli4_params.cqav);
14974                         switch (cq->entry_count) {
14975                         case 2048:
14976                         case 4096:
14977                                 if (phba->sli4_hba.pc_sli4_params.cqv ==
14978                                     LPFC_Q_CREATE_VERSION_2) {
14979                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14980                                                &cq_set->u.request,
14981                                                 cq->entry_count);
14982                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14983                                                &cq_set->u.request,
14984                                                LPFC_CQ_CNT_WORD7);
14985                                         break;
14986                                 }
14987                                 /* fall through */
14988                         default:
14989                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14990                                                 "3118 Bad CQ count. (%d)\n",
14991                                                 cq->entry_count);
14992                                 if (cq->entry_count < 256) {
14993                                         status = -EINVAL;
14994                                         goto out;
14995                                 }
14996                                 /* fall through - otherwise default to smallest */
14997                         case 256:
14998                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14999                                        &cq_set->u.request, LPFC_CQ_CNT_256);
15000                                 break;
15001                         case 512:
15002                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15003                                        &cq_set->u.request, LPFC_CQ_CNT_512);
15004                                 break;
15005                         case 1024:
15006                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15007                                        &cq_set->u.request, LPFC_CQ_CNT_1024);
15008                                 break;
15009                         }
15010                         bf_set(lpfc_mbx_cq_create_set_eq_id0,
15011                                &cq_set->u.request, eq->queue_id);
15012                         break;
15013                 case 1:
15014                         bf_set(lpfc_mbx_cq_create_set_eq_id1,
15015                                &cq_set->u.request, eq->queue_id);
15016                         break;
15017                 case 2:
15018                         bf_set(lpfc_mbx_cq_create_set_eq_id2,
15019                                &cq_set->u.request, eq->queue_id);
15020                         break;
15021                 case 3:
15022                         bf_set(lpfc_mbx_cq_create_set_eq_id3,
15023                                &cq_set->u.request, eq->queue_id);
15024                         break;
15025                 case 4:
15026                         bf_set(lpfc_mbx_cq_create_set_eq_id4,
15027                                &cq_set->u.request, eq->queue_id);
15028                         break;
15029                 case 5:
15030                         bf_set(lpfc_mbx_cq_create_set_eq_id5,
15031                                &cq_set->u.request, eq->queue_id);
15032                         break;
15033                 case 6:
15034                         bf_set(lpfc_mbx_cq_create_set_eq_id6,
15035                                &cq_set->u.request, eq->queue_id);
15036                         break;
15037                 case 7:
15038                         bf_set(lpfc_mbx_cq_create_set_eq_id7,
15039                                &cq_set->u.request, eq->queue_id);
15040                         break;
15041                 case 8:
15042                         bf_set(lpfc_mbx_cq_create_set_eq_id8,
15043                                &cq_set->u.request, eq->queue_id);
15044                         break;
15045                 case 9:
15046                         bf_set(lpfc_mbx_cq_create_set_eq_id9,
15047                                &cq_set->u.request, eq->queue_id);
15048                         break;
15049                 case 10:
15050                         bf_set(lpfc_mbx_cq_create_set_eq_id10,
15051                                &cq_set->u.request, eq->queue_id);
15052                         break;
15053                 case 11:
15054                         bf_set(lpfc_mbx_cq_create_set_eq_id11,
15055                                &cq_set->u.request, eq->queue_id);
15056                         break;
15057                 case 12:
15058                         bf_set(lpfc_mbx_cq_create_set_eq_id12,
15059                                &cq_set->u.request, eq->queue_id);
15060                         break;
15061                 case 13:
15062                         bf_set(lpfc_mbx_cq_create_set_eq_id13,
15063                                &cq_set->u.request, eq->queue_id);
15064                         break;
15065                 case 14:
15066                         bf_set(lpfc_mbx_cq_create_set_eq_id14,
15067                                &cq_set->u.request, eq->queue_id);
15068                         break;
15069                 case 15:
15070                         bf_set(lpfc_mbx_cq_create_set_eq_id15,
15071                                &cq_set->u.request, eq->queue_id);
15072                         break;
15073                 }
15074
15075                 /* link the cq onto the parent eq child list */
15076                 list_add_tail(&cq->list, &eq->child_list);
15077                 /* Set up completion queue's type and subtype */
15078                 cq->type = type;
15079                 cq->subtype = subtype;
15080                 cq->assoc_qid = eq->queue_id;
15081                 cq->assoc_qp = eq;
15082                 cq->host_index = 0;
15083                 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15084                 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15085                                          cq->entry_count);
15086                 cq->chann = idx;
15087
15088                 rc = 0;
15089                 list_for_each_entry(dmabuf, &cq->page_list, list) {
15090                         memset(dmabuf->virt, 0, hw_page_size);
15091                         cnt = page_idx + dmabuf->buffer_tag;
15092                         cq_set->u.request.page[cnt].addr_lo =
15093                                         putPaddrLow(dmabuf->phys);
15094                         cq_set->u.request.page[cnt].addr_hi =
15095                                         putPaddrHigh(dmabuf->phys);
15096                         rc++;
15097                 }
15098                 page_idx += rc;
15099         }
15100
15101         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15102
15103         /* The IOCTL status is embedded in the mailbox subheader. */
15104         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15105         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15106         if (shdr_status || shdr_add_status || rc) {
15107                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15108                                 "3119 CQ_CREATE_SET mailbox failed with "
15109                                 "status x%x add_status x%x, mbx status x%x\n",
15110                                 shdr_status, shdr_add_status, rc);
15111                 status = -ENXIO;
15112                 goto out;
15113         }
15114         rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15115         if (rc == 0xFFFF) {
15116                 status = -ENXIO;
15117                 goto out;
15118         }
15119
15120         for (idx = 0; idx < numcq; idx++) {
15121                 cq = cqp[idx];
15122                 cq->queue_id = rc + idx;
15123                 if (cq->queue_id > phba->sli4_hba.cq_max)
15124                         phba->sli4_hba.cq_max = cq->queue_id;
15125         }
15126
15127 out:
15128         lpfc_sli4_mbox_cmd_free(phba, mbox);
15129         return status;
15130 }
15131
15132 /**
15133  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15134  * @phba: HBA structure that indicates port to create a queue on.
15135  * @mq: The queue structure to use to create the mailbox queue.
15136  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15137  * @cq: The completion queue to associate with this cq.
15138  *
15139  * This function provides failback (fb) functionality when the
15140  * mq_create_ext fails on older FW generations.  It's purpose is identical
15141  * to mq_create_ext otherwise.
15142  *
15143  * This routine cannot fail as all attributes were previously accessed and
15144  * initialized in mq_create_ext.
15145  **/
15146 static void
15147 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15148                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15149 {
15150         struct lpfc_mbx_mq_create *mq_create;
15151         struct lpfc_dmabuf *dmabuf;
15152         int length;
15153
15154         length = (sizeof(struct lpfc_mbx_mq_create) -
15155                   sizeof(struct lpfc_sli4_cfg_mhdr));
15156         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15157                          LPFC_MBOX_OPCODE_MQ_CREATE,
15158                          length, LPFC_SLI4_MBX_EMBED);
15159         mq_create = &mbox->u.mqe.un.mq_create;
15160         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15161                mq->page_count);
15162         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15163                cq->queue_id);
15164         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15165         switch (mq->entry_count) {
15166         case 16:
15167                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15168                        LPFC_MQ_RING_SIZE_16);
15169                 break;
15170         case 32:
15171                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15172                        LPFC_MQ_RING_SIZE_32);
15173                 break;
15174         case 64:
15175                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15176                        LPFC_MQ_RING_SIZE_64);
15177                 break;
15178         case 128:
15179                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15180                        LPFC_MQ_RING_SIZE_128);
15181                 break;
15182         }
15183         list_for_each_entry(dmabuf, &mq->page_list, list) {
15184                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15185                         putPaddrLow(dmabuf->phys);
15186                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15187                         putPaddrHigh(dmabuf->phys);
15188         }
15189 }
15190
15191 /**
15192  * lpfc_mq_create - Create a mailbox Queue on the HBA
15193  * @phba: HBA structure that indicates port to create a queue on.
15194  * @mq: The queue structure to use to create the mailbox queue.
15195  * @cq: The completion queue to associate with this cq.
15196  * @subtype: The queue's subtype.
15197  *
15198  * This function creates a mailbox queue, as detailed in @mq, on a port,
15199  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15200  *
15201  * The @phba struct is used to send mailbox command to HBA. The @cq struct
15202  * is used to get the entry count and entry size that are necessary to
15203  * determine the number of pages to allocate and use for this queue. This
15204  * function will send the MQ_CREATE mailbox command to the HBA to setup the
15205  * mailbox queue. This function is asynchronous and will wait for the mailbox
15206  * command to finish before continuing.
15207  *
15208  * On success this function will return a zero. If unable to allocate enough
15209  * memory this function will return -ENOMEM. If the queue create mailbox command
15210  * fails this function will return -ENXIO.
15211  **/
15212 int32_t
15213 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15214                struct lpfc_queue *cq, uint32_t subtype)
15215 {
15216         struct lpfc_mbx_mq_create *mq_create;
15217         struct lpfc_mbx_mq_create_ext *mq_create_ext;
15218         struct lpfc_dmabuf *dmabuf;
15219         LPFC_MBOXQ_t *mbox;
15220         int rc, length, status = 0;
15221         uint32_t shdr_status, shdr_add_status;
15222         union lpfc_sli4_cfg_shdr *shdr;
15223         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15224
15225         /* sanity check on queue memory */
15226         if (!mq || !cq)
15227                 return -ENODEV;
15228         if (!phba->sli4_hba.pc_sli4_params.supported)
15229                 hw_page_size = SLI4_PAGE_SIZE;
15230
15231         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15232         if (!mbox)
15233                 return -ENOMEM;
15234         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15235                   sizeof(struct lpfc_sli4_cfg_mhdr));
15236         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15237                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15238                          length, LPFC_SLI4_MBX_EMBED);
15239
15240         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15241         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15242         bf_set(lpfc_mbx_mq_create_ext_num_pages,
15243                &mq_create_ext->u.request, mq->page_count);
15244         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15245                &mq_create_ext->u.request, 1);
15246         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15247                &mq_create_ext->u.request, 1);
15248         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15249                &mq_create_ext->u.request, 1);
15250         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15251                &mq_create_ext->u.request, 1);
15252         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15253                &mq_create_ext->u.request, 1);
15254         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15255         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15256                phba->sli4_hba.pc_sli4_params.mqv);
15257         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15258                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15259                        cq->queue_id);
15260         else
15261                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15262                        cq->queue_id);
15263         switch (mq->entry_count) {
15264         default:
15265                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15266                                 "0362 Unsupported MQ count. (%d)\n",
15267                                 mq->entry_count);
15268                 if (mq->entry_count < 16) {
15269                         status = -EINVAL;
15270                         goto out;
15271                 }
15272                 /* fall through - otherwise default to smallest count */
15273         case 16:
15274                 bf_set(lpfc_mq_context_ring_size,
15275                        &mq_create_ext->u.request.context,
15276                        LPFC_MQ_RING_SIZE_16);
15277                 break;
15278         case 32:
15279                 bf_set(lpfc_mq_context_ring_size,
15280                        &mq_create_ext->u.request.context,
15281                        LPFC_MQ_RING_SIZE_32);
15282                 break;
15283         case 64:
15284                 bf_set(lpfc_mq_context_ring_size,
15285                        &mq_create_ext->u.request.context,
15286                        LPFC_MQ_RING_SIZE_64);
15287                 break;
15288         case 128:
15289                 bf_set(lpfc_mq_context_ring_size,
15290                        &mq_create_ext->u.request.context,
15291                        LPFC_MQ_RING_SIZE_128);
15292                 break;
15293         }
15294         list_for_each_entry(dmabuf, &mq->page_list, list) {
15295                 memset(dmabuf->virt, 0, hw_page_size);
15296                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15297                                         putPaddrLow(dmabuf->phys);
15298                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15299                                         putPaddrHigh(dmabuf->phys);
15300         }
15301         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15302         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15303                               &mq_create_ext->u.response);
15304         if (rc != MBX_SUCCESS) {
15305                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15306                                 "2795 MQ_CREATE_EXT failed with "
15307                                 "status x%x. Failback to MQ_CREATE.\n",
15308                                 rc);
15309                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15310                 mq_create = &mbox->u.mqe.un.mq_create;
15311                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15312                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15313                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15314                                       &mq_create->u.response);
15315         }
15316
15317         /* The IOCTL status is embedded in the mailbox subheader. */
15318         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15319         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15320         if (shdr_status || shdr_add_status || rc) {
15321                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15322                                 "2502 MQ_CREATE mailbox failed with "
15323                                 "status x%x add_status x%x, mbx status x%x\n",
15324                                 shdr_status, shdr_add_status, rc);
15325                 status = -ENXIO;
15326                 goto out;
15327         }
15328         if (mq->queue_id == 0xFFFF) {
15329                 status = -ENXIO;
15330                 goto out;
15331         }
15332         mq->type = LPFC_MQ;
15333         mq->assoc_qid = cq->queue_id;
15334         mq->subtype = subtype;
15335         mq->host_index = 0;
15336         mq->hba_index = 0;
15337
15338         /* link the mq onto the parent cq child list */
15339         list_add_tail(&mq->list, &cq->child_list);
15340 out:
15341         mempool_free(mbox, phba->mbox_mem_pool);
15342         return status;
15343 }
15344
15345 /**
15346  * lpfc_wq_create - Create a Work Queue on the HBA
15347  * @phba: HBA structure that indicates port to create a queue on.
15348  * @wq: The queue structure to use to create the work queue.
15349  * @cq: The completion queue to bind this work queue to.
15350  * @subtype: The subtype of the work queue indicating its functionality.
15351  *
15352  * This function creates a work queue, as detailed in @wq, on a port, described
15353  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15354  *
15355  * The @phba struct is used to send mailbox command to HBA. The @wq struct
15356  * is used to get the entry count and entry size that are necessary to
15357  * determine the number of pages to allocate and use for this queue. The @cq
15358  * is used to indicate which completion queue to bind this work queue to. This
15359  * function will send the WQ_CREATE mailbox command to the HBA to setup the
15360  * work queue. This function is asynchronous and will wait for the mailbox
15361  * command to finish before continuing.
15362  *
15363  * On success this function will return a zero. If unable to allocate enough
15364  * memory this function will return -ENOMEM. If the queue create mailbox command
15365  * fails this function will return -ENXIO.
15366  **/
15367 int
15368 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15369                struct lpfc_queue *cq, uint32_t subtype)
15370 {
15371         struct lpfc_mbx_wq_create *wq_create;
15372         struct lpfc_dmabuf *dmabuf;
15373         LPFC_MBOXQ_t *mbox;
15374         int rc, length, status = 0;
15375         uint32_t shdr_status, shdr_add_status;
15376         union lpfc_sli4_cfg_shdr *shdr;
15377         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15378         struct dma_address *page;
15379         void __iomem *bar_memmap_p;
15380         uint32_t db_offset;
15381         uint16_t pci_barset;
15382         uint8_t dpp_barset;
15383         uint32_t dpp_offset;
15384         unsigned long pg_addr;
15385         uint8_t wq_create_version;
15386
15387         /* sanity check on queue memory */
15388         if (!wq || !cq)
15389                 return -ENODEV;
15390         if (!phba->sli4_hba.pc_sli4_params.supported)
15391                 hw_page_size = wq->page_size;
15392
15393         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15394         if (!mbox)
15395                 return -ENOMEM;
15396         length = (sizeof(struct lpfc_mbx_wq_create) -
15397                   sizeof(struct lpfc_sli4_cfg_mhdr));
15398         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15399                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15400                          length, LPFC_SLI4_MBX_EMBED);
15401         wq_create = &mbox->u.mqe.un.wq_create;
15402         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15403         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15404                     wq->page_count);
15405         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15406                     cq->queue_id);
15407
15408         /* wqv is the earliest version supported, NOT the latest */
15409         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15410                phba->sli4_hba.pc_sli4_params.wqv);
15411
15412         if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15413             (wq->page_size > SLI4_PAGE_SIZE))
15414                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15415         else
15416                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15417
15418
15419         if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15420                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15421         else
15422                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15423
15424         switch (wq_create_version) {
15425         case LPFC_Q_CREATE_VERSION_1:
15426                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15427                        wq->entry_count);
15428                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15429                        LPFC_Q_CREATE_VERSION_1);
15430
15431                 switch (wq->entry_size) {
15432                 default:
15433                 case 64:
15434                         bf_set(lpfc_mbx_wq_create_wqe_size,
15435                                &wq_create->u.request_1,
15436                                LPFC_WQ_WQE_SIZE_64);
15437                         break;
15438                 case 128:
15439                         bf_set(lpfc_mbx_wq_create_wqe_size,
15440                                &wq_create->u.request_1,
15441                                LPFC_WQ_WQE_SIZE_128);
15442                         break;
15443                 }
15444                 /* Request DPP by default */
15445                 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15446                 bf_set(lpfc_mbx_wq_create_page_size,
15447                        &wq_create->u.request_1,
15448                        (wq->page_size / SLI4_PAGE_SIZE));
15449                 page = wq_create->u.request_1.page;
15450                 break;
15451         default:
15452                 page = wq_create->u.request.page;
15453                 break;
15454         }
15455
15456         list_for_each_entry(dmabuf, &wq->page_list, list) {
15457                 memset(dmabuf->virt, 0, hw_page_size);
15458                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15459                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15460         }
15461
15462         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15463                 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15464
15465         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15466         /* The IOCTL status is embedded in the mailbox subheader. */
15467         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15468         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15469         if (shdr_status || shdr_add_status || rc) {
15470                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15471                                 "2503 WQ_CREATE mailbox failed with "
15472                                 "status x%x add_status x%x, mbx status x%x\n",
15473                                 shdr_status, shdr_add_status, rc);
15474                 status = -ENXIO;
15475                 goto out;
15476         }
15477
15478         if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15479                 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15480                                         &wq_create->u.response);
15481         else
15482                 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15483                                         &wq_create->u.response_1);
15484
15485         if (wq->queue_id == 0xFFFF) {
15486                 status = -ENXIO;
15487                 goto out;
15488         }
15489
15490         wq->db_format = LPFC_DB_LIST_FORMAT;
15491         if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15492                 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15493                         wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15494                                                &wq_create->u.response);
15495                         if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15496                             (wq->db_format != LPFC_DB_RING_FORMAT)) {
15497                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15498                                                 "3265 WQ[%d] doorbell format "
15499                                                 "not supported: x%x\n",
15500                                                 wq->queue_id, wq->db_format);
15501                                 status = -EINVAL;
15502                                 goto out;
15503                         }
15504                         pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15505                                             &wq_create->u.response);
15506                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15507                                                                    pci_barset);
15508                         if (!bar_memmap_p) {
15509                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15510                                                 "3263 WQ[%d] failed to memmap "
15511                                                 "pci barset:x%x\n",
15512                                                 wq->queue_id, pci_barset);
15513                                 status = -ENOMEM;
15514                                 goto out;
15515                         }
15516                         db_offset = wq_create->u.response.doorbell_offset;
15517                         if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15518                             (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15519                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15520                                                 "3252 WQ[%d] doorbell offset "
15521                                                 "not supported: x%x\n",
15522                                                 wq->queue_id, db_offset);
15523                                 status = -EINVAL;
15524                                 goto out;
15525                         }
15526                         wq->db_regaddr = bar_memmap_p + db_offset;
15527                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15528                                         "3264 WQ[%d]: barset:x%x, offset:x%x, "
15529                                         "format:x%x\n", wq->queue_id,
15530                                         pci_barset, db_offset, wq->db_format);
15531                 } else
15532                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15533         } else {
15534                 /* Check if DPP was honored by the firmware */
15535                 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15536                                     &wq_create->u.response_1);
15537                 if (wq->dpp_enable) {
15538                         pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15539                                             &wq_create->u.response_1);
15540                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15541                                                                    pci_barset);
15542                         if (!bar_memmap_p) {
15543                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15544                                                 "3267 WQ[%d] failed to memmap "
15545                                                 "pci barset:x%x\n",
15546                                                 wq->queue_id, pci_barset);
15547                                 status = -ENOMEM;
15548                                 goto out;
15549                         }
15550                         db_offset = wq_create->u.response_1.doorbell_offset;
15551                         wq->db_regaddr = bar_memmap_p + db_offset;
15552                         wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15553                                             &wq_create->u.response_1);
15554                         dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15555                                             &wq_create->u.response_1);
15556                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15557                                                                    dpp_barset);
15558                         if (!bar_memmap_p) {
15559                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15560                                                 "3268 WQ[%d] failed to memmap "
15561                                                 "pci barset:x%x\n",
15562                                                 wq->queue_id, dpp_barset);
15563                                 status = -ENOMEM;
15564                                 goto out;
15565                         }
15566                         dpp_offset = wq_create->u.response_1.dpp_offset;
15567                         wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15568                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15569                                         "3271 WQ[%d]: barset:x%x, offset:x%x, "
15570                                         "dpp_id:x%x dpp_barset:x%x "
15571                                         "dpp_offset:x%x\n",
15572                                         wq->queue_id, pci_barset, db_offset,
15573                                         wq->dpp_id, dpp_barset, dpp_offset);
15574
15575                         /* Enable combined writes for DPP aperture */
15576                         pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15577 #ifdef CONFIG_X86
15578                         rc = set_memory_wc(pg_addr, 1);
15579                         if (rc) {
15580                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15581                                         "3272 Cannot setup Combined "
15582                                         "Write on WQ[%d] - disable DPP\n",
15583                                         wq->queue_id);
15584                                 phba->cfg_enable_dpp = 0;
15585                         }
15586 #else
15587                         phba->cfg_enable_dpp = 0;
15588 #endif
15589                 } else
15590                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15591         }
15592         wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15593         if (wq->pring == NULL) {
15594                 status = -ENOMEM;
15595                 goto out;
15596         }
15597         wq->type = LPFC_WQ;
15598         wq->assoc_qid = cq->queue_id;
15599         wq->subtype = subtype;
15600         wq->host_index = 0;
15601         wq->hba_index = 0;
15602         wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15603
15604         /* link the wq onto the parent cq child list */
15605         list_add_tail(&wq->list, &cq->child_list);
15606 out:
15607         mempool_free(mbox, phba->mbox_mem_pool);
15608         return status;
15609 }
15610
15611 /**
15612  * lpfc_rq_create - Create a Receive Queue on the HBA
15613  * @phba: HBA structure that indicates port to create a queue on.
15614  * @hrq: The queue structure to use to create the header receive queue.
15615  * @drq: The queue structure to use to create the data receive queue.
15616  * @cq: The completion queue to bind this work queue to.
15617  *
15618  * This function creates a receive buffer queue pair , as detailed in @hrq and
15619  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15620  * to the HBA.
15621  *
15622  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15623  * struct is used to get the entry count that is necessary to determine the
15624  * number of pages to use for this queue. The @cq is used to indicate which
15625  * completion queue to bind received buffers that are posted to these queues to.
15626  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15627  * receive queue pair. This function is asynchronous and will wait for the
15628  * mailbox command to finish before continuing.
15629  *
15630  * On success this function will return a zero. If unable to allocate enough
15631  * memory this function will return -ENOMEM. If the queue create mailbox command
15632  * fails this function will return -ENXIO.
15633  **/
15634 int
15635 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15636                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15637 {
15638         struct lpfc_mbx_rq_create *rq_create;
15639         struct lpfc_dmabuf *dmabuf;
15640         LPFC_MBOXQ_t *mbox;
15641         int rc, length, status = 0;
15642         uint32_t shdr_status, shdr_add_status;
15643         union lpfc_sli4_cfg_shdr *shdr;
15644         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15645         void __iomem *bar_memmap_p;
15646         uint32_t db_offset;
15647         uint16_t pci_barset;
15648
15649         /* sanity check on queue memory */
15650         if (!hrq || !drq || !cq)
15651                 return -ENODEV;
15652         if (!phba->sli4_hba.pc_sli4_params.supported)
15653                 hw_page_size = SLI4_PAGE_SIZE;
15654
15655         if (hrq->entry_count != drq->entry_count)
15656                 return -EINVAL;
15657         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15658         if (!mbox)
15659                 return -ENOMEM;
15660         length = (sizeof(struct lpfc_mbx_rq_create) -
15661                   sizeof(struct lpfc_sli4_cfg_mhdr));
15662         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15663                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15664                          length, LPFC_SLI4_MBX_EMBED);
15665         rq_create = &mbox->u.mqe.un.rq_create;
15666         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15667         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15668                phba->sli4_hba.pc_sli4_params.rqv);
15669         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15670                 bf_set(lpfc_rq_context_rqe_count_1,
15671                        &rq_create->u.request.context,
15672                        hrq->entry_count);
15673                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15674                 bf_set(lpfc_rq_context_rqe_size,
15675                        &rq_create->u.request.context,
15676                        LPFC_RQE_SIZE_8);
15677                 bf_set(lpfc_rq_context_page_size,
15678                        &rq_create->u.request.context,
15679                        LPFC_RQ_PAGE_SIZE_4096);
15680         } else {
15681                 switch (hrq->entry_count) {
15682                 default:
15683                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15684                                         "2535 Unsupported RQ count. (%d)\n",
15685                                         hrq->entry_count);
15686                         if (hrq->entry_count < 512) {
15687                                 status = -EINVAL;
15688                                 goto out;
15689                         }
15690                         /* fall through - otherwise default to smallest count */
15691                 case 512:
15692                         bf_set(lpfc_rq_context_rqe_count,
15693                                &rq_create->u.request.context,
15694                                LPFC_RQ_RING_SIZE_512);
15695                         break;
15696                 case 1024:
15697                         bf_set(lpfc_rq_context_rqe_count,
15698                                &rq_create->u.request.context,
15699                                LPFC_RQ_RING_SIZE_1024);
15700                         break;
15701                 case 2048:
15702                         bf_set(lpfc_rq_context_rqe_count,
15703                                &rq_create->u.request.context,
15704                                LPFC_RQ_RING_SIZE_2048);
15705                         break;
15706                 case 4096:
15707                         bf_set(lpfc_rq_context_rqe_count,
15708                                &rq_create->u.request.context,
15709                                LPFC_RQ_RING_SIZE_4096);
15710                         break;
15711                 }
15712                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15713                        LPFC_HDR_BUF_SIZE);
15714         }
15715         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15716                cq->queue_id);
15717         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15718                hrq->page_count);
15719         list_for_each_entry(dmabuf, &hrq->page_list, list) {
15720                 memset(dmabuf->virt, 0, hw_page_size);
15721                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15722                                         putPaddrLow(dmabuf->phys);
15723                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15724                                         putPaddrHigh(dmabuf->phys);
15725         }
15726         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15727                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15728
15729         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15730         /* The IOCTL status is embedded in the mailbox subheader. */
15731         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15732         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15733         if (shdr_status || shdr_add_status || rc) {
15734                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15735                                 "2504 RQ_CREATE mailbox failed with "
15736                                 "status x%x add_status x%x, mbx status x%x\n",
15737                                 shdr_status, shdr_add_status, rc);
15738                 status = -ENXIO;
15739                 goto out;
15740         }
15741         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15742         if (hrq->queue_id == 0xFFFF) {
15743                 status = -ENXIO;
15744                 goto out;
15745         }
15746
15747         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15748                 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15749                                         &rq_create->u.response);
15750                 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15751                     (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15752                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15753                                         "3262 RQ [%d] doorbell format not "
15754                                         "supported: x%x\n", hrq->queue_id,
15755                                         hrq->db_format);
15756                         status = -EINVAL;
15757                         goto out;
15758                 }
15759
15760                 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15761                                     &rq_create->u.response);
15762                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15763                 if (!bar_memmap_p) {
15764                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15765                                         "3269 RQ[%d] failed to memmap pci "
15766                                         "barset:x%x\n", hrq->queue_id,
15767                                         pci_barset);
15768                         status = -ENOMEM;
15769                         goto out;
15770                 }
15771
15772                 db_offset = rq_create->u.response.doorbell_offset;
15773                 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15774                     (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15775                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15776                                         "3270 RQ[%d] doorbell offset not "
15777                                         "supported: x%x\n", hrq->queue_id,
15778                                         db_offset);
15779                         status = -EINVAL;
15780                         goto out;
15781                 }
15782                 hrq->db_regaddr = bar_memmap_p + db_offset;
15783                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15784                                 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15785                                 "format:x%x\n", hrq->queue_id, pci_barset,
15786                                 db_offset, hrq->db_format);
15787         } else {
15788                 hrq->db_format = LPFC_DB_RING_FORMAT;
15789                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15790         }
15791         hrq->type = LPFC_HRQ;
15792         hrq->assoc_qid = cq->queue_id;
15793         hrq->subtype = subtype;
15794         hrq->host_index = 0;
15795         hrq->hba_index = 0;
15796         hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15797
15798         /* now create the data queue */
15799         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15800                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15801                          length, LPFC_SLI4_MBX_EMBED);
15802         bf_set(lpfc_mbox_hdr_version, &shdr->request,
15803                phba->sli4_hba.pc_sli4_params.rqv);
15804         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15805                 bf_set(lpfc_rq_context_rqe_count_1,
15806                        &rq_create->u.request.context, hrq->entry_count);
15807                 if (subtype == LPFC_NVMET)
15808                         rq_create->u.request.context.buffer_size =
15809                                 LPFC_NVMET_DATA_BUF_SIZE;
15810                 else
15811                         rq_create->u.request.context.buffer_size =
15812                                 LPFC_DATA_BUF_SIZE;
15813                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15814                        LPFC_RQE_SIZE_8);
15815                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15816                        (PAGE_SIZE/SLI4_PAGE_SIZE));
15817         } else {
15818                 switch (drq->entry_count) {
15819                 default:
15820                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15821                                         "2536 Unsupported RQ count. (%d)\n",
15822                                         drq->entry_count);
15823                         if (drq->entry_count < 512) {
15824                                 status = -EINVAL;
15825                                 goto out;
15826                         }
15827                         /* fall through - otherwise default to smallest count */
15828                 case 512:
15829                         bf_set(lpfc_rq_context_rqe_count,
15830                                &rq_create->u.request.context,
15831                                LPFC_RQ_RING_SIZE_512);
15832                         break;
15833                 case 1024:
15834                         bf_set(lpfc_rq_context_rqe_count,
15835                                &rq_create->u.request.context,
15836                                LPFC_RQ_RING_SIZE_1024);
15837                         break;
15838                 case 2048:
15839                         bf_set(lpfc_rq_context_rqe_count,
15840                                &rq_create->u.request.context,
15841                                LPFC_RQ_RING_SIZE_2048);
15842                         break;
15843                 case 4096:
15844                         bf_set(lpfc_rq_context_rqe_count,
15845                                &rq_create->u.request.context,
15846                                LPFC_RQ_RING_SIZE_4096);
15847                         break;
15848                 }
15849                 if (subtype == LPFC_NVMET)
15850                         bf_set(lpfc_rq_context_buf_size,
15851                                &rq_create->u.request.context,
15852                                LPFC_NVMET_DATA_BUF_SIZE);
15853                 else
15854                         bf_set(lpfc_rq_context_buf_size,
15855                                &rq_create->u.request.context,
15856                                LPFC_DATA_BUF_SIZE);
15857         }
15858         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15859                cq->queue_id);
15860         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15861                drq->page_count);
15862         list_for_each_entry(dmabuf, &drq->page_list, list) {
15863                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15864                                         putPaddrLow(dmabuf->phys);
15865                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15866                                         putPaddrHigh(dmabuf->phys);
15867         }
15868         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15869                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15870         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15871         /* The IOCTL status is embedded in the mailbox subheader. */
15872         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15873         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15874         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15875         if (shdr_status || shdr_add_status || rc) {
15876                 status = -ENXIO;
15877                 goto out;
15878         }
15879         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15880         if (drq->queue_id == 0xFFFF) {
15881                 status = -ENXIO;
15882                 goto out;
15883         }
15884         drq->type = LPFC_DRQ;
15885         drq->assoc_qid = cq->queue_id;
15886         drq->subtype = subtype;
15887         drq->host_index = 0;
15888         drq->hba_index = 0;
15889         drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15890
15891         /* link the header and data RQs onto the parent cq child list */
15892         list_add_tail(&hrq->list, &cq->child_list);
15893         list_add_tail(&drq->list, &cq->child_list);
15894
15895 out:
15896         mempool_free(mbox, phba->mbox_mem_pool);
15897         return status;
15898 }
15899
15900 /**
15901  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15902  * @phba: HBA structure that indicates port to create a queue on.
15903  * @hrqp: The queue structure array to use to create the header receive queues.
15904  * @drqp: The queue structure array to use to create the data receive queues.
15905  * @cqp: The completion queue array to bind these receive queues to.
15906  *
15907  * This function creates a receive buffer queue pair , as detailed in @hrq and
15908  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15909  * to the HBA.
15910  *
15911  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15912  * struct is used to get the entry count that is necessary to determine the
15913  * number of pages to use for this queue. The @cq is used to indicate which
15914  * completion queue to bind received buffers that are posted to these queues to.
15915  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15916  * receive queue pair. This function is asynchronous and will wait for the
15917  * mailbox command to finish before continuing.
15918  *
15919  * On success this function will return a zero. If unable to allocate enough
15920  * memory this function will return -ENOMEM. If the queue create mailbox command
15921  * fails this function will return -ENXIO.
15922  **/
15923 int
15924 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15925                 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15926                 uint32_t subtype)
15927 {
15928         struct lpfc_queue *hrq, *drq, *cq;
15929         struct lpfc_mbx_rq_create_v2 *rq_create;
15930         struct lpfc_dmabuf *dmabuf;
15931         LPFC_MBOXQ_t *mbox;
15932         int rc, length, alloclen, status = 0;
15933         int cnt, idx, numrq, page_idx = 0;
15934         uint32_t shdr_status, shdr_add_status;
15935         union lpfc_sli4_cfg_shdr *shdr;
15936         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15937
15938         numrq = phba->cfg_nvmet_mrq;
15939         /* sanity check on array memory */
15940         if (!hrqp || !drqp || !cqp || !numrq)
15941                 return -ENODEV;
15942         if (!phba->sli4_hba.pc_sli4_params.supported)
15943                 hw_page_size = SLI4_PAGE_SIZE;
15944
15945         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15946         if (!mbox)
15947                 return -ENOMEM;
15948
15949         length = sizeof(struct lpfc_mbx_rq_create_v2);
15950         length += ((2 * numrq * hrqp[0]->page_count) *
15951                    sizeof(struct dma_address));
15952
15953         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15954                                     LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15955                                     LPFC_SLI4_MBX_NEMBED);
15956         if (alloclen < length) {
15957                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15958                                 "3099 Allocated DMA memory size (%d) is "
15959                                 "less than the requested DMA memory size "
15960                                 "(%d)\n", alloclen, length);
15961                 status = -ENOMEM;
15962                 goto out;
15963         }
15964
15965
15966
15967         rq_create = mbox->sge_array->addr[0];
15968         shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15969
15970         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15971         cnt = 0;
15972
15973         for (idx = 0; idx < numrq; idx++) {
15974                 hrq = hrqp[idx];
15975                 drq = drqp[idx];
15976                 cq  = cqp[idx];
15977
15978                 /* sanity check on queue memory */
15979                 if (!hrq || !drq || !cq) {
15980                         status = -ENODEV;
15981                         goto out;
15982                 }
15983
15984                 if (hrq->entry_count != drq->entry_count) {
15985                         status = -EINVAL;
15986                         goto out;
15987                 }
15988
15989                 if (idx == 0) {
15990                         bf_set(lpfc_mbx_rq_create_num_pages,
15991                                &rq_create->u.request,
15992                                hrq->page_count);
15993                         bf_set(lpfc_mbx_rq_create_rq_cnt,
15994                                &rq_create->u.request, (numrq * 2));
15995                         bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15996                                1);
15997                         bf_set(lpfc_rq_context_base_cq,
15998                                &rq_create->u.request.context,
15999                                cq->queue_id);
16000                         bf_set(lpfc_rq_context_data_size,
16001                                &rq_create->u.request.context,
16002                                LPFC_NVMET_DATA_BUF_SIZE);
16003                         bf_set(lpfc_rq_context_hdr_size,
16004                                &rq_create->u.request.context,
16005                                LPFC_HDR_BUF_SIZE);
16006                         bf_set(lpfc_rq_context_rqe_count_1,
16007                                &rq_create->u.request.context,
16008                                hrq->entry_count);
16009                         bf_set(lpfc_rq_context_rqe_size,
16010                                &rq_create->u.request.context,
16011                                LPFC_RQE_SIZE_8);
16012                         bf_set(lpfc_rq_context_page_size,
16013                                &rq_create->u.request.context,
16014                                (PAGE_SIZE/SLI4_PAGE_SIZE));
16015                 }
16016                 rc = 0;
16017                 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16018                         memset(dmabuf->virt, 0, hw_page_size);
16019                         cnt = page_idx + dmabuf->buffer_tag;
16020                         rq_create->u.request.page[cnt].addr_lo =
16021                                         putPaddrLow(dmabuf->phys);
16022                         rq_create->u.request.page[cnt].addr_hi =
16023                                         putPaddrHigh(dmabuf->phys);
16024                         rc++;
16025                 }
16026                 page_idx += rc;
16027
16028                 rc = 0;
16029                 list_for_each_entry(dmabuf, &drq->page_list, list) {
16030                         memset(dmabuf->virt, 0, hw_page_size);
16031                         cnt = page_idx + dmabuf->buffer_tag;
16032                         rq_create->u.request.page[cnt].addr_lo =
16033                                         putPaddrLow(dmabuf->phys);
16034                         rq_create->u.request.page[cnt].addr_hi =
16035                                         putPaddrHigh(dmabuf->phys);
16036                         rc++;
16037                 }
16038                 page_idx += rc;
16039
16040                 hrq->db_format = LPFC_DB_RING_FORMAT;
16041                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16042                 hrq->type = LPFC_HRQ;
16043                 hrq->assoc_qid = cq->queue_id;
16044                 hrq->subtype = subtype;
16045                 hrq->host_index = 0;
16046                 hrq->hba_index = 0;
16047                 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16048
16049                 drq->db_format = LPFC_DB_RING_FORMAT;
16050                 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16051                 drq->type = LPFC_DRQ;
16052                 drq->assoc_qid = cq->queue_id;
16053                 drq->subtype = subtype;
16054                 drq->host_index = 0;
16055                 drq->hba_index = 0;
16056                 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16057
16058                 list_add_tail(&hrq->list, &cq->child_list);
16059                 list_add_tail(&drq->list, &cq->child_list);
16060         }
16061
16062         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16063         /* The IOCTL status is embedded in the mailbox subheader. */
16064         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16065         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16066         if (shdr_status || shdr_add_status || rc) {
16067                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16068                                 "3120 RQ_CREATE mailbox failed with "
16069                                 "status x%x add_status x%x, mbx status x%x\n",
16070                                 shdr_status, shdr_add_status, rc);
16071                 status = -ENXIO;
16072                 goto out;
16073         }
16074         rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16075         if (rc == 0xFFFF) {
16076                 status = -ENXIO;
16077                 goto out;
16078         }
16079
16080         /* Initialize all RQs with associated queue id */
16081         for (idx = 0; idx < numrq; idx++) {
16082                 hrq = hrqp[idx];
16083                 hrq->queue_id = rc + (2 * idx);
16084                 drq = drqp[idx];
16085                 drq->queue_id = rc + (2 * idx) + 1;
16086         }
16087
16088 out:
16089         lpfc_sli4_mbox_cmd_free(phba, mbox);
16090         return status;
16091 }
16092
16093 /**
16094  * lpfc_eq_destroy - Destroy an event Queue on the HBA
16095  * @eq: The queue structure associated with the queue to destroy.
16096  *
16097  * This function destroys a queue, as detailed in @eq by sending an mailbox
16098  * command, specific to the type of queue, to the HBA.
16099  *
16100  * The @eq struct is used to get the queue ID of the queue to destroy.
16101  *
16102  * On success this function will return a zero. If the queue destroy mailbox
16103  * command fails this function will return -ENXIO.
16104  **/
16105 int
16106 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16107 {
16108         LPFC_MBOXQ_t *mbox;
16109         int rc, length, status = 0;
16110         uint32_t shdr_status, shdr_add_status;
16111         union lpfc_sli4_cfg_shdr *shdr;
16112
16113         /* sanity check on queue memory */
16114         if (!eq)
16115                 return -ENODEV;
16116
16117         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16118         if (!mbox)
16119                 return -ENOMEM;
16120         length = (sizeof(struct lpfc_mbx_eq_destroy) -
16121                   sizeof(struct lpfc_sli4_cfg_mhdr));
16122         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16123                          LPFC_MBOX_OPCODE_EQ_DESTROY,
16124                          length, LPFC_SLI4_MBX_EMBED);
16125         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16126                eq->queue_id);
16127         mbox->vport = eq->phba->pport;
16128         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16129
16130         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16131         /* The IOCTL status is embedded in the mailbox subheader. */
16132         shdr = (union lpfc_sli4_cfg_shdr *)
16133                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16134         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16135         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16136         if (shdr_status || shdr_add_status || rc) {
16137                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16138                                 "2505 EQ_DESTROY mailbox failed with "
16139                                 "status x%x add_status x%x, mbx status x%x\n",
16140                                 shdr_status, shdr_add_status, rc);
16141                 status = -ENXIO;
16142         }
16143
16144         /* Remove eq from any list */
16145         list_del_init(&eq->list);
16146         mempool_free(mbox, eq->phba->mbox_mem_pool);
16147         return status;
16148 }
16149
16150 /**
16151  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16152  * @cq: The queue structure associated with the queue to destroy.
16153  *
16154  * This function destroys a queue, as detailed in @cq by sending an mailbox
16155  * command, specific to the type of queue, to the HBA.
16156  *
16157  * The @cq struct is used to get the queue ID of the queue to destroy.
16158  *
16159  * On success this function will return a zero. If the queue destroy mailbox
16160  * command fails this function will return -ENXIO.
16161  **/
16162 int
16163 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16164 {
16165         LPFC_MBOXQ_t *mbox;
16166         int rc, length, status = 0;
16167         uint32_t shdr_status, shdr_add_status;
16168         union lpfc_sli4_cfg_shdr *shdr;
16169
16170         /* sanity check on queue memory */
16171         if (!cq)
16172                 return -ENODEV;
16173         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16174         if (!mbox)
16175                 return -ENOMEM;
16176         length = (sizeof(struct lpfc_mbx_cq_destroy) -
16177                   sizeof(struct lpfc_sli4_cfg_mhdr));
16178         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16179                          LPFC_MBOX_OPCODE_CQ_DESTROY,
16180                          length, LPFC_SLI4_MBX_EMBED);
16181         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16182                cq->queue_id);
16183         mbox->vport = cq->phba->pport;
16184         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16185         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16186         /* The IOCTL status is embedded in the mailbox subheader. */
16187         shdr = (union lpfc_sli4_cfg_shdr *)
16188                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16189         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16190         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16191         if (shdr_status || shdr_add_status || rc) {
16192                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16193                                 "2506 CQ_DESTROY mailbox failed with "
16194                                 "status x%x add_status x%x, mbx status x%x\n",
16195                                 shdr_status, shdr_add_status, rc);
16196                 status = -ENXIO;
16197         }
16198         /* Remove cq from any list */
16199         list_del_init(&cq->list);
16200         mempool_free(mbox, cq->phba->mbox_mem_pool);
16201         return status;
16202 }
16203
16204 /**
16205  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16206  * @qm: The queue structure associated with the queue to destroy.
16207  *
16208  * This function destroys a queue, as detailed in @mq by sending an mailbox
16209  * command, specific to the type of queue, to the HBA.
16210  *
16211  * The @mq struct is used to get the queue ID of the queue to destroy.
16212  *
16213  * On success this function will return a zero. If the queue destroy mailbox
16214  * command fails this function will return -ENXIO.
16215  **/
16216 int
16217 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16218 {
16219         LPFC_MBOXQ_t *mbox;
16220         int rc, length, status = 0;
16221         uint32_t shdr_status, shdr_add_status;
16222         union lpfc_sli4_cfg_shdr *shdr;
16223
16224         /* sanity check on queue memory */
16225         if (!mq)
16226                 return -ENODEV;
16227         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16228         if (!mbox)
16229                 return -ENOMEM;
16230         length = (sizeof(struct lpfc_mbx_mq_destroy) -
16231                   sizeof(struct lpfc_sli4_cfg_mhdr));
16232         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16233                          LPFC_MBOX_OPCODE_MQ_DESTROY,
16234                          length, LPFC_SLI4_MBX_EMBED);
16235         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16236                mq->queue_id);
16237         mbox->vport = mq->phba->pport;
16238         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16239         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16240         /* The IOCTL status is embedded in the mailbox subheader. */
16241         shdr = (union lpfc_sli4_cfg_shdr *)
16242                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16243         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16244         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16245         if (shdr_status || shdr_add_status || rc) {
16246                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16247                                 "2507 MQ_DESTROY mailbox failed with "
16248                                 "status x%x add_status x%x, mbx status x%x\n",
16249                                 shdr_status, shdr_add_status, rc);
16250                 status = -ENXIO;
16251         }
16252         /* Remove mq from any list */
16253         list_del_init(&mq->list);
16254         mempool_free(mbox, mq->phba->mbox_mem_pool);
16255         return status;
16256 }
16257
16258 /**
16259  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16260  * @wq: The queue structure associated with the queue to destroy.
16261  *
16262  * This function destroys a queue, as detailed in @wq by sending an mailbox
16263  * command, specific to the type of queue, to the HBA.
16264  *
16265  * The @wq struct is used to get the queue ID of the queue to destroy.
16266  *
16267  * On success this function will return a zero. If the queue destroy mailbox
16268  * command fails this function will return -ENXIO.
16269  **/
16270 int
16271 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16272 {
16273         LPFC_MBOXQ_t *mbox;
16274         int rc, length, status = 0;
16275         uint32_t shdr_status, shdr_add_status;
16276         union lpfc_sli4_cfg_shdr *shdr;
16277
16278         /* sanity check on queue memory */
16279         if (!wq)
16280                 return -ENODEV;
16281         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16282         if (!mbox)
16283                 return -ENOMEM;
16284         length = (sizeof(struct lpfc_mbx_wq_destroy) -
16285                   sizeof(struct lpfc_sli4_cfg_mhdr));
16286         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16287                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16288                          length, LPFC_SLI4_MBX_EMBED);
16289         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16290                wq->queue_id);
16291         mbox->vport = wq->phba->pport;
16292         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16293         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16294         shdr = (union lpfc_sli4_cfg_shdr *)
16295                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16296         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16297         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16298         if (shdr_status || shdr_add_status || rc) {
16299                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16300                                 "2508 WQ_DESTROY mailbox failed with "
16301                                 "status x%x add_status x%x, mbx status x%x\n",
16302                                 shdr_status, shdr_add_status, rc);
16303                 status = -ENXIO;
16304         }
16305         /* Remove wq from any list */
16306         list_del_init(&wq->list);
16307         kfree(wq->pring);
16308         wq->pring = NULL;
16309         mempool_free(mbox, wq->phba->mbox_mem_pool);
16310         return status;
16311 }
16312
16313 /**
16314  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16315  * @rq: The queue structure associated with the queue to destroy.
16316  *
16317  * This function destroys a queue, as detailed in @rq by sending an mailbox
16318  * command, specific to the type of queue, to the HBA.
16319  *
16320  * The @rq struct is used to get the queue ID of the queue to destroy.
16321  *
16322  * On success this function will return a zero. If the queue destroy mailbox
16323  * command fails this function will return -ENXIO.
16324  **/
16325 int
16326 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16327                 struct lpfc_queue *drq)
16328 {
16329         LPFC_MBOXQ_t *mbox;
16330         int rc, length, status = 0;
16331         uint32_t shdr_status, shdr_add_status;
16332         union lpfc_sli4_cfg_shdr *shdr;
16333
16334         /* sanity check on queue memory */
16335         if (!hrq || !drq)
16336                 return -ENODEV;
16337         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16338         if (!mbox)
16339                 return -ENOMEM;
16340         length = (sizeof(struct lpfc_mbx_rq_destroy) -
16341                   sizeof(struct lpfc_sli4_cfg_mhdr));
16342         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16343                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16344                          length, LPFC_SLI4_MBX_EMBED);
16345         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16346                hrq->queue_id);
16347         mbox->vport = hrq->phba->pport;
16348         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16349         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16350         /* The IOCTL status is embedded in the mailbox subheader. */
16351         shdr = (union lpfc_sli4_cfg_shdr *)
16352                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16353         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16354         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16355         if (shdr_status || shdr_add_status || rc) {
16356                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16357                                 "2509 RQ_DESTROY mailbox failed with "
16358                                 "status x%x add_status x%x, mbx status x%x\n",
16359                                 shdr_status, shdr_add_status, rc);
16360                 if (rc != MBX_TIMEOUT)
16361                         mempool_free(mbox, hrq->phba->mbox_mem_pool);
16362                 return -ENXIO;
16363         }
16364         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16365                drq->queue_id);
16366         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16367         shdr = (union lpfc_sli4_cfg_shdr *)
16368                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16369         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16370         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16371         if (shdr_status || shdr_add_status || rc) {
16372                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16373                                 "2510 RQ_DESTROY mailbox failed with "
16374                                 "status x%x add_status x%x, mbx status x%x\n",
16375                                 shdr_status, shdr_add_status, rc);
16376                 status = -ENXIO;
16377         }
16378         list_del_init(&hrq->list);
16379         list_del_init(&drq->list);
16380         mempool_free(mbox, hrq->phba->mbox_mem_pool);
16381         return status;
16382 }
16383
16384 /**
16385  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16386  * @phba: The virtual port for which this call being executed.
16387  * @pdma_phys_addr0: Physical address of the 1st SGL page.
16388  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16389  * @xritag: the xritag that ties this io to the SGL pages.
16390  *
16391  * This routine will post the sgl pages for the IO that has the xritag
16392  * that is in the iocbq structure. The xritag is assigned during iocbq
16393  * creation and persists for as long as the driver is loaded.
16394  * if the caller has fewer than 256 scatter gather segments to map then
16395  * pdma_phys_addr1 should be 0.
16396  * If the caller needs to map more than 256 scatter gather segment then
16397  * pdma_phys_addr1 should be a valid physical address.
16398  * physical address for SGLs must be 64 byte aligned.
16399  * If you are going to map 2 SGL's then the first one must have 256 entries
16400  * the second sgl can have between 1 and 256 entries.
16401  *
16402  * Return codes:
16403  *      0 - Success
16404  *      -ENXIO, -ENOMEM - Failure
16405  **/
16406 int
16407 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16408                 dma_addr_t pdma_phys_addr0,
16409                 dma_addr_t pdma_phys_addr1,
16410                 uint16_t xritag)
16411 {
16412         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16413         LPFC_MBOXQ_t *mbox;
16414         int rc;
16415         uint32_t shdr_status, shdr_add_status;
16416         uint32_t mbox_tmo;
16417         union lpfc_sli4_cfg_shdr *shdr;
16418
16419         if (xritag == NO_XRI) {
16420                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16421                                 "0364 Invalid param:\n");
16422                 return -EINVAL;
16423         }
16424
16425         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16426         if (!mbox)
16427                 return -ENOMEM;
16428
16429         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16430                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16431                         sizeof(struct lpfc_mbx_post_sgl_pages) -
16432                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16433
16434         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16435                                 &mbox->u.mqe.un.post_sgl_pages;
16436         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16437         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16438
16439         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16440                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16441         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16442                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16443
16444         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16445                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16446         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16447                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16448         if (!phba->sli4_hba.intr_enable)
16449                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16450         else {
16451                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16452                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16453         }
16454         /* The IOCTL status is embedded in the mailbox subheader. */
16455         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16456         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16457         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16458         if (rc != MBX_TIMEOUT)
16459                 mempool_free(mbox, phba->mbox_mem_pool);
16460         if (shdr_status || shdr_add_status || rc) {
16461                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16462                                 "2511 POST_SGL mailbox failed with "
16463                                 "status x%x add_status x%x, mbx status x%x\n",
16464                                 shdr_status, shdr_add_status, rc);
16465         }
16466         return 0;
16467 }
16468
16469 /**
16470  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16471  * @phba: pointer to lpfc hba data structure.
16472  *
16473  * This routine is invoked to post rpi header templates to the
16474  * HBA consistent with the SLI-4 interface spec.  This routine
16475  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16476  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16477  *
16478  * Returns
16479  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16480  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
16481  **/
16482 static uint16_t
16483 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16484 {
16485         unsigned long xri;
16486
16487         /*
16488          * Fetch the next logical xri.  Because this index is logical,
16489          * the driver starts at 0 each time.
16490          */
16491         spin_lock_irq(&phba->hbalock);
16492         xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16493                                  phba->sli4_hba.max_cfg_param.max_xri, 0);
16494         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16495                 spin_unlock_irq(&phba->hbalock);
16496                 return NO_XRI;
16497         } else {
16498                 set_bit(xri, phba->sli4_hba.xri_bmask);
16499                 phba->sli4_hba.max_cfg_param.xri_used++;
16500         }
16501         spin_unlock_irq(&phba->hbalock);
16502         return xri;
16503 }
16504
16505 /**
16506  * lpfc_sli4_free_xri - Release an xri for reuse.
16507  * @phba: pointer to lpfc hba data structure.
16508  *
16509  * This routine is invoked to release an xri to the pool of
16510  * available rpis maintained by the driver.
16511  **/
16512 static void
16513 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16514 {
16515         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16516                 phba->sli4_hba.max_cfg_param.xri_used--;
16517         }
16518 }
16519
16520 /**
16521  * lpfc_sli4_free_xri - Release an xri for reuse.
16522  * @phba: pointer to lpfc hba data structure.
16523  *
16524  * This routine is invoked to release an xri to the pool of
16525  * available rpis maintained by the driver.
16526  **/
16527 void
16528 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16529 {
16530         spin_lock_irq(&phba->hbalock);
16531         __lpfc_sli4_free_xri(phba, xri);
16532         spin_unlock_irq(&phba->hbalock);
16533 }
16534
16535 /**
16536  * lpfc_sli4_next_xritag - Get an xritag for the io
16537  * @phba: Pointer to HBA context object.
16538  *
16539  * This function gets an xritag for the iocb. If there is no unused xritag
16540  * it will return 0xffff.
16541  * The function returns the allocated xritag if successful, else returns zero.
16542  * Zero is not a valid xritag.
16543  * The caller is not required to hold any lock.
16544  **/
16545 uint16_t
16546 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16547 {
16548         uint16_t xri_index;
16549
16550         xri_index = lpfc_sli4_alloc_xri(phba);
16551         if (xri_index == NO_XRI)
16552                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16553                                 "2004 Failed to allocate XRI.last XRITAG is %d"
16554                                 " Max XRI is %d, Used XRI is %d\n",
16555                                 xri_index,
16556                                 phba->sli4_hba.max_cfg_param.max_xri,
16557                                 phba->sli4_hba.max_cfg_param.xri_used);
16558         return xri_index;
16559 }
16560
16561 /**
16562  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16563  * @phba: pointer to lpfc hba data structure.
16564  * @post_sgl_list: pointer to els sgl entry list.
16565  * @count: number of els sgl entries on the list.
16566  *
16567  * This routine is invoked to post a block of driver's sgl pages to the
16568  * HBA using non-embedded mailbox command. No Lock is held. This routine
16569  * is only called when the driver is loading and after all IO has been
16570  * stopped.
16571  **/
16572 static int
16573 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16574                             struct list_head *post_sgl_list,
16575                             int post_cnt)
16576 {
16577         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16578         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16579         struct sgl_page_pairs *sgl_pg_pairs;
16580         void *viraddr;
16581         LPFC_MBOXQ_t *mbox;
16582         uint32_t reqlen, alloclen, pg_pairs;
16583         uint32_t mbox_tmo;
16584         uint16_t xritag_start = 0;
16585         int rc = 0;
16586         uint32_t shdr_status, shdr_add_status;
16587         union lpfc_sli4_cfg_shdr *shdr;
16588
16589         reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16590                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16591         if (reqlen > SLI4_PAGE_SIZE) {
16592                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16593                                 "2559 Block sgl registration required DMA "
16594                                 "size (%d) great than a page\n", reqlen);
16595                 return -ENOMEM;
16596         }
16597
16598         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16599         if (!mbox)
16600                 return -ENOMEM;
16601
16602         /* Allocate DMA memory and set up the non-embedded mailbox command */
16603         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16604                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16605                          LPFC_SLI4_MBX_NEMBED);
16606
16607         if (alloclen < reqlen) {
16608                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16609                                 "0285 Allocated DMA memory size (%d) is "
16610                                 "less than the requested DMA memory "
16611                                 "size (%d)\n", alloclen, reqlen);
16612                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16613                 return -ENOMEM;
16614         }
16615         /* Set up the SGL pages in the non-embedded DMA pages */
16616         viraddr = mbox->sge_array->addr[0];
16617         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16618         sgl_pg_pairs = &sgl->sgl_pg_pairs;
16619
16620         pg_pairs = 0;
16621         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16622                 /* Set up the sge entry */
16623                 sgl_pg_pairs->sgl_pg0_addr_lo =
16624                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16625                 sgl_pg_pairs->sgl_pg0_addr_hi =
16626                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16627                 sgl_pg_pairs->sgl_pg1_addr_lo =
16628                                 cpu_to_le32(putPaddrLow(0));
16629                 sgl_pg_pairs->sgl_pg1_addr_hi =
16630                                 cpu_to_le32(putPaddrHigh(0));
16631
16632                 /* Keep the first xritag on the list */
16633                 if (pg_pairs == 0)
16634                         xritag_start = sglq_entry->sli4_xritag;
16635                 sgl_pg_pairs++;
16636                 pg_pairs++;
16637         }
16638
16639         /* Complete initialization and perform endian conversion. */
16640         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16641         bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16642         sgl->word0 = cpu_to_le32(sgl->word0);
16643
16644         if (!phba->sli4_hba.intr_enable)
16645                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16646         else {
16647                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16648                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16649         }
16650         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16651         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16652         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16653         if (rc != MBX_TIMEOUT)
16654                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16655         if (shdr_status || shdr_add_status || rc) {
16656                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16657                                 "2513 POST_SGL_BLOCK mailbox command failed "
16658                                 "status x%x add_status x%x mbx status x%x\n",
16659                                 shdr_status, shdr_add_status, rc);
16660                 rc = -ENXIO;
16661         }
16662         return rc;
16663 }
16664
16665 /**
16666  * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
16667  * @phba: pointer to lpfc hba data structure.
16668  * @nblist: pointer to nvme buffer list.
16669  * @count: number of scsi buffers on the list.
16670  *
16671  * This routine is invoked to post a block of @count scsi sgl pages from a
16672  * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16673  * No Lock is held.
16674  *
16675  **/
16676 static int
16677 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16678                             int count)
16679 {
16680         struct lpfc_io_buf *lpfc_ncmd;
16681         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16682         struct sgl_page_pairs *sgl_pg_pairs;
16683         void *viraddr;
16684         LPFC_MBOXQ_t *mbox;
16685         uint32_t reqlen, alloclen, pg_pairs;
16686         uint32_t mbox_tmo;
16687         uint16_t xritag_start = 0;
16688         int rc = 0;
16689         uint32_t shdr_status, shdr_add_status;
16690         dma_addr_t pdma_phys_bpl1;
16691         union lpfc_sli4_cfg_shdr *shdr;
16692
16693         /* Calculate the requested length of the dma memory */
16694         reqlen = count * sizeof(struct sgl_page_pairs) +
16695                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16696         if (reqlen > SLI4_PAGE_SIZE) {
16697                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16698                                 "6118 Block sgl registration required DMA "
16699                                 "size (%d) great than a page\n", reqlen);
16700                 return -ENOMEM;
16701         }
16702         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16703         if (!mbox) {
16704                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16705                                 "6119 Failed to allocate mbox cmd memory\n");
16706                 return -ENOMEM;
16707         }
16708
16709         /* Allocate DMA memory and set up the non-embedded mailbox command */
16710         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16711                                     LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16712                                     reqlen, LPFC_SLI4_MBX_NEMBED);
16713
16714         if (alloclen < reqlen) {
16715                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16716                                 "6120 Allocated DMA memory size (%d) is "
16717                                 "less than the requested DMA memory "
16718                                 "size (%d)\n", alloclen, reqlen);
16719                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16720                 return -ENOMEM;
16721         }
16722
16723         /* Get the first SGE entry from the non-embedded DMA memory */
16724         viraddr = mbox->sge_array->addr[0];
16725
16726         /* Set up the SGL pages in the non-embedded DMA pages */
16727         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16728         sgl_pg_pairs = &sgl->sgl_pg_pairs;
16729
16730         pg_pairs = 0;
16731         list_for_each_entry(lpfc_ncmd, nblist, list) {
16732                 /* Set up the sge entry */
16733                 sgl_pg_pairs->sgl_pg0_addr_lo =
16734                         cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16735                 sgl_pg_pairs->sgl_pg0_addr_hi =
16736                         cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16737                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16738                         pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16739                                                 SGL_PAGE_SIZE;
16740                 else
16741                         pdma_phys_bpl1 = 0;
16742                 sgl_pg_pairs->sgl_pg1_addr_lo =
16743                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16744                 sgl_pg_pairs->sgl_pg1_addr_hi =
16745                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16746                 /* Keep the first xritag on the list */
16747                 if (pg_pairs == 0)
16748                         xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16749                 sgl_pg_pairs++;
16750                 pg_pairs++;
16751         }
16752         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16753         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16754         /* Perform endian conversion if necessary */
16755         sgl->word0 = cpu_to_le32(sgl->word0);
16756
16757         if (!phba->sli4_hba.intr_enable) {
16758                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16759         } else {
16760                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16761                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16762         }
16763         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16764         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16765         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16766         if (rc != MBX_TIMEOUT)
16767                 lpfc_sli4_mbox_cmd_free(phba, mbox);
16768         if (shdr_status || shdr_add_status || rc) {
16769                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16770                                 "6125 POST_SGL_BLOCK mailbox command failed "
16771                                 "status x%x add_status x%x mbx status x%x\n",
16772                                 shdr_status, shdr_add_status, rc);
16773                 rc = -ENXIO;
16774         }
16775         return rc;
16776 }
16777
16778 /**
16779  * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
16780  * @phba: pointer to lpfc hba data structure.
16781  * @post_nblist: pointer to the nvme buffer list.
16782  *
16783  * This routine walks a list of nvme buffers that was passed in. It attempts
16784  * to construct blocks of nvme buffer sgls which contains contiguous xris and
16785  * uses the non-embedded SGL block post mailbox commands to post to the port.
16786  * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16787  * embedded SGL post mailbox command for posting. The @post_nblist passed in
16788  * must be local list, thus no lock is needed when manipulate the list.
16789  *
16790  * Returns: 0 = failure, non-zero number of successfully posted buffers.
16791  **/
16792 int
16793 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16794                            struct list_head *post_nblist, int sb_count)
16795 {
16796         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
16797         int status, sgl_size;
16798         int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16799         dma_addr_t pdma_phys_sgl1;
16800         int last_xritag = NO_XRI;
16801         int cur_xritag;
16802         LIST_HEAD(prep_nblist);
16803         LIST_HEAD(blck_nblist);
16804         LIST_HEAD(nvme_nblist);
16805
16806         /* sanity check */
16807         if (sb_count <= 0)
16808                 return -EINVAL;
16809
16810         sgl_size = phba->cfg_sg_dma_buf_size;
16811         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16812                 list_del_init(&lpfc_ncmd->list);
16813                 block_cnt++;
16814                 if ((last_xritag != NO_XRI) &&
16815                     (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16816                         /* a hole in xri block, form a sgl posting block */
16817                         list_splice_init(&prep_nblist, &blck_nblist);
16818                         post_cnt = block_cnt - 1;
16819                         /* prepare list for next posting block */
16820                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16821                         block_cnt = 1;
16822                 } else {
16823                         /* prepare list for next posting block */
16824                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16825                         /* enough sgls for non-embed sgl mbox command */
16826                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16827                                 list_splice_init(&prep_nblist, &blck_nblist);
16828                                 post_cnt = block_cnt;
16829                                 block_cnt = 0;
16830                         }
16831                 }
16832                 num_posting++;
16833                 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16834
16835                 /* end of repost sgl list condition for NVME buffers */
16836                 if (num_posting == sb_count) {
16837                         if (post_cnt == 0) {
16838                                 /* last sgl posting block */
16839                                 list_splice_init(&prep_nblist, &blck_nblist);
16840                                 post_cnt = block_cnt;
16841                         } else if (block_cnt == 1) {
16842                                 /* last single sgl with non-contiguous xri */
16843                                 if (sgl_size > SGL_PAGE_SIZE)
16844                                         pdma_phys_sgl1 =
16845                                                 lpfc_ncmd->dma_phys_sgl +
16846                                                 SGL_PAGE_SIZE;
16847                                 else
16848                                         pdma_phys_sgl1 = 0;
16849                                 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16850                                 status = lpfc_sli4_post_sgl(
16851                                                 phba, lpfc_ncmd->dma_phys_sgl,
16852                                                 pdma_phys_sgl1, cur_xritag);
16853                                 if (status) {
16854                                         /* Post error.  Buffer unavailable. */
16855                                         lpfc_ncmd->flags |=
16856                                                 LPFC_SBUF_NOT_POSTED;
16857                                 } else {
16858                                         /* Post success. Bffer available. */
16859                                         lpfc_ncmd->flags &=
16860                                                 ~LPFC_SBUF_NOT_POSTED;
16861                                         lpfc_ncmd->status = IOSTAT_SUCCESS;
16862                                         num_posted++;
16863                                 }
16864                                 /* success, put on NVME buffer sgl list */
16865                                 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16866                         }
16867                 }
16868
16869                 /* continue until a nembed page worth of sgls */
16870                 if (post_cnt == 0)
16871                         continue;
16872
16873                 /* post block of NVME buffer list sgls */
16874                 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16875                                                      post_cnt);
16876
16877                 /* don't reset xirtag due to hole in xri block */
16878                 if (block_cnt == 0)
16879                         last_xritag = NO_XRI;
16880
16881                 /* reset NVME buffer post count for next round of posting */
16882                 post_cnt = 0;
16883
16884                 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
16885                 while (!list_empty(&blck_nblist)) {
16886                         list_remove_head(&blck_nblist, lpfc_ncmd,
16887                                          struct lpfc_io_buf, list);
16888                         if (status) {
16889                                 /* Post error.  Mark buffer unavailable. */
16890                                 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
16891                         } else {
16892                                 /* Post success, Mark buffer available. */
16893                                 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
16894                                 lpfc_ncmd->status = IOSTAT_SUCCESS;
16895                                 num_posted++;
16896                         }
16897                         list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16898                 }
16899         }
16900         /* Push NVME buffers with sgl posted to the available list */
16901         lpfc_io_buf_replenish(phba, &nvme_nblist);
16902
16903         return num_posted;
16904 }
16905
16906 /**
16907  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16908  * @phba: pointer to lpfc_hba struct that the frame was received on
16909  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16910  *
16911  * This function checks the fields in the @fc_hdr to see if the FC frame is a
16912  * valid type of frame that the LPFC driver will handle. This function will
16913  * return a zero if the frame is a valid frame or a non zero value when the
16914  * frame does not pass the check.
16915  **/
16916 static int
16917 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16918 {
16919         /*  make rctl_names static to save stack space */
16920         struct fc_vft_header *fc_vft_hdr;
16921         uint32_t *header = (uint32_t *) fc_hdr;
16922
16923 #define FC_RCTL_MDS_DIAGS       0xF4
16924
16925         switch (fc_hdr->fh_r_ctl) {
16926         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
16927         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
16928         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
16929         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
16930         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
16931         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
16932         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
16933         case FC_RCTL_DD_CMD_STATUS:     /* command status */
16934         case FC_RCTL_ELS_REQ:   /* extended link services request */
16935         case FC_RCTL_ELS_REP:   /* extended link services reply */
16936         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
16937         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
16938         case FC_RCTL_BA_NOP:    /* basic link service NOP */
16939         case FC_RCTL_BA_ABTS:   /* basic link service abort */
16940         case FC_RCTL_BA_RMC:    /* remove connection */
16941         case FC_RCTL_BA_ACC:    /* basic accept */
16942         case FC_RCTL_BA_RJT:    /* basic reject */
16943         case FC_RCTL_BA_PRMT:
16944         case FC_RCTL_ACK_1:     /* acknowledge_1 */
16945         case FC_RCTL_ACK_0:     /* acknowledge_0 */
16946         case FC_RCTL_P_RJT:     /* port reject */
16947         case FC_RCTL_F_RJT:     /* fabric reject */
16948         case FC_RCTL_P_BSY:     /* port busy */
16949         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
16950         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
16951         case FC_RCTL_LCR:       /* link credit reset */
16952         case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16953         case FC_RCTL_END:       /* end */
16954                 break;
16955         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
16956                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16957                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16958                 return lpfc_fc_frame_check(phba, fc_hdr);
16959         default:
16960                 goto drop;
16961         }
16962
16963         switch (fc_hdr->fh_type) {
16964         case FC_TYPE_BLS:
16965         case FC_TYPE_ELS:
16966         case FC_TYPE_FCP:
16967         case FC_TYPE_CT:
16968         case FC_TYPE_NVME:
16969                 break;
16970         case FC_TYPE_IP:
16971         case FC_TYPE_ILS:
16972         default:
16973                 goto drop;
16974         }
16975
16976         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16977                         "2538 Received frame rctl:x%x, type:x%x, "
16978                         "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16979                         fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16980                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16981                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16982                         be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16983                         be32_to_cpu(header[6]));
16984         return 0;
16985 drop:
16986         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16987                         "2539 Dropped frame rctl:x%x type:x%x\n",
16988                         fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16989         return 1;
16990 }
16991
16992 /**
16993  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16994  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16995  *
16996  * This function processes the FC header to retrieve the VFI from the VF
16997  * header, if one exists. This function will return the VFI if one exists
16998  * or 0 if no VSAN Header exists.
16999  **/
17000 static uint32_t
17001 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17002 {
17003         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17004
17005         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17006                 return 0;
17007         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17008 }
17009
17010 /**
17011  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17012  * @phba: Pointer to the HBA structure to search for the vport on
17013  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17014  * @fcfi: The FC Fabric ID that the frame came from
17015  *
17016  * This function searches the @phba for a vport that matches the content of the
17017  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17018  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17019  * returns the matching vport pointer or NULL if unable to match frame to a
17020  * vport.
17021  **/
17022 static struct lpfc_vport *
17023 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17024                        uint16_t fcfi, uint32_t did)
17025 {
17026         struct lpfc_vport **vports;
17027         struct lpfc_vport *vport = NULL;
17028         int i;
17029
17030         if (did == Fabric_DID)
17031                 return phba->pport;
17032         if ((phba->pport->fc_flag & FC_PT2PT) &&
17033                 !(phba->link_state == LPFC_HBA_READY))
17034                 return phba->pport;
17035
17036         vports = lpfc_create_vport_work_array(phba);
17037         if (vports != NULL) {
17038                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17039                         if (phba->fcf.fcfi == fcfi &&
17040                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17041                             vports[i]->fc_myDID == did) {
17042                                 vport = vports[i];
17043                                 break;
17044                         }
17045                 }
17046         }
17047         lpfc_destroy_vport_work_array(phba, vports);
17048         return vport;
17049 }
17050
17051 /**
17052  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17053  * @vport: The vport to work on.
17054  *
17055  * This function updates the receive sequence time stamp for this vport. The
17056  * receive sequence time stamp indicates the time that the last frame of the
17057  * the sequence that has been idle for the longest amount of time was received.
17058  * the driver uses this time stamp to indicate if any received sequences have
17059  * timed out.
17060  **/
17061 static void
17062 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17063 {
17064         struct lpfc_dmabuf *h_buf;
17065         struct hbq_dmabuf *dmabuf = NULL;
17066
17067         /* get the oldest sequence on the rcv list */
17068         h_buf = list_get_first(&vport->rcv_buffer_list,
17069                                struct lpfc_dmabuf, list);
17070         if (!h_buf)
17071                 return;
17072         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17073         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17074 }
17075
17076 /**
17077  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17078  * @vport: The vport that the received sequences were sent to.
17079  *
17080  * This function cleans up all outstanding received sequences. This is called
17081  * by the driver when a link event or user action invalidates all the received
17082  * sequences.
17083  **/
17084 void
17085 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17086 {
17087         struct lpfc_dmabuf *h_buf, *hnext;
17088         struct lpfc_dmabuf *d_buf, *dnext;
17089         struct hbq_dmabuf *dmabuf = NULL;
17090
17091         /* start with the oldest sequence on the rcv list */
17092         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17093                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17094                 list_del_init(&dmabuf->hbuf.list);
17095                 list_for_each_entry_safe(d_buf, dnext,
17096                                          &dmabuf->dbuf.list, list) {
17097                         list_del_init(&d_buf->list);
17098                         lpfc_in_buf_free(vport->phba, d_buf);
17099                 }
17100                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17101         }
17102 }
17103
17104 /**
17105  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17106  * @vport: The vport that the received sequences were sent to.
17107  *
17108  * This function determines whether any received sequences have timed out by
17109  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17110  * indicates that there is at least one timed out sequence this routine will
17111  * go through the received sequences one at a time from most inactive to most
17112  * active to determine which ones need to be cleaned up. Once it has determined
17113  * that a sequence needs to be cleaned up it will simply free up the resources
17114  * without sending an abort.
17115  **/
17116 void
17117 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17118 {
17119         struct lpfc_dmabuf *h_buf, *hnext;
17120         struct lpfc_dmabuf *d_buf, *dnext;
17121         struct hbq_dmabuf *dmabuf = NULL;
17122         unsigned long timeout;
17123         int abort_count = 0;
17124
17125         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17126                    vport->rcv_buffer_time_stamp);
17127         if (list_empty(&vport->rcv_buffer_list) ||
17128             time_before(jiffies, timeout))
17129                 return;
17130         /* start with the oldest sequence on the rcv list */
17131         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17132                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17133                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17134                            dmabuf->time_stamp);
17135                 if (time_before(jiffies, timeout))
17136                         break;
17137                 abort_count++;
17138                 list_del_init(&dmabuf->hbuf.list);
17139                 list_for_each_entry_safe(d_buf, dnext,
17140                                          &dmabuf->dbuf.list, list) {
17141                         list_del_init(&d_buf->list);
17142                         lpfc_in_buf_free(vport->phba, d_buf);
17143                 }
17144                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17145         }
17146         if (abort_count)
17147                 lpfc_update_rcv_time_stamp(vport);
17148 }
17149
17150 /**
17151  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17152  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17153  *
17154  * This function searches through the existing incomplete sequences that have
17155  * been sent to this @vport. If the frame matches one of the incomplete
17156  * sequences then the dbuf in the @dmabuf is added to the list of frames that
17157  * make up that sequence. If no sequence is found that matches this frame then
17158  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17159  * This function returns a pointer to the first dmabuf in the sequence list that
17160  * the frame was linked to.
17161  **/
17162 static struct hbq_dmabuf *
17163 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17164 {
17165         struct fc_frame_header *new_hdr;
17166         struct fc_frame_header *temp_hdr;
17167         struct lpfc_dmabuf *d_buf;
17168         struct lpfc_dmabuf *h_buf;
17169         struct hbq_dmabuf *seq_dmabuf = NULL;
17170         struct hbq_dmabuf *temp_dmabuf = NULL;
17171         uint8_t found = 0;
17172
17173         INIT_LIST_HEAD(&dmabuf->dbuf.list);
17174         dmabuf->time_stamp = jiffies;
17175         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17176
17177         /* Use the hdr_buf to find the sequence that this frame belongs to */
17178         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17179                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17180                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17181                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17182                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17183                         continue;
17184                 /* found a pending sequence that matches this frame */
17185                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17186                 break;
17187         }
17188         if (!seq_dmabuf) {
17189                 /*
17190                  * This indicates first frame received for this sequence.
17191                  * Queue the buffer on the vport's rcv_buffer_list.
17192                  */
17193                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17194                 lpfc_update_rcv_time_stamp(vport);
17195                 return dmabuf;
17196         }
17197         temp_hdr = seq_dmabuf->hbuf.virt;
17198         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17199                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17200                 list_del_init(&seq_dmabuf->hbuf.list);
17201                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17202                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17203                 lpfc_update_rcv_time_stamp(vport);
17204                 return dmabuf;
17205         }
17206         /* move this sequence to the tail to indicate a young sequence */
17207         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17208         seq_dmabuf->time_stamp = jiffies;
17209         lpfc_update_rcv_time_stamp(vport);
17210         if (list_empty(&seq_dmabuf->dbuf.list)) {
17211                 temp_hdr = dmabuf->hbuf.virt;
17212                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17213                 return seq_dmabuf;
17214         }
17215         /* find the correct place in the sequence to insert this frame */
17216         d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17217         while (!found) {
17218                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17219                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17220                 /*
17221                  * If the frame's sequence count is greater than the frame on
17222                  * the list then insert the frame right after this frame
17223                  */
17224                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17225                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17226                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17227                         found = 1;
17228                         break;
17229                 }
17230
17231                 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17232                         break;
17233                 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17234         }
17235
17236         if (found)
17237                 return seq_dmabuf;
17238         return NULL;
17239 }
17240
17241 /**
17242  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17243  * @vport: pointer to a vitural port
17244  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17245  *
17246  * This function tries to abort from the partially assembed sequence, described
17247  * by the information from basic abbort @dmabuf. It checks to see whether such
17248  * partially assembled sequence held by the driver. If so, it shall free up all
17249  * the frames from the partially assembled sequence.
17250  *
17251  * Return
17252  * true  -- if there is matching partially assembled sequence present and all
17253  *          the frames freed with the sequence;
17254  * false -- if there is no matching partially assembled sequence present so
17255  *          nothing got aborted in the lower layer driver
17256  **/
17257 static bool
17258 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17259                             struct hbq_dmabuf *dmabuf)
17260 {
17261         struct fc_frame_header *new_hdr;
17262         struct fc_frame_header *temp_hdr;
17263         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17264         struct hbq_dmabuf *seq_dmabuf = NULL;
17265
17266         /* Use the hdr_buf to find the sequence that matches this frame */
17267         INIT_LIST_HEAD(&dmabuf->dbuf.list);
17268         INIT_LIST_HEAD(&dmabuf->hbuf.list);
17269         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17270         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17271                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17272                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17273                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17274                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17275                         continue;
17276                 /* found a pending sequence that matches this frame */
17277                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17278                 break;
17279         }
17280
17281         /* Free up all the frames from the partially assembled sequence */
17282         if (seq_dmabuf) {
17283                 list_for_each_entry_safe(d_buf, n_buf,
17284                                          &seq_dmabuf->dbuf.list, list) {
17285                         list_del_init(&d_buf->list);
17286                         lpfc_in_buf_free(vport->phba, d_buf);
17287                 }
17288                 return true;
17289         }
17290         return false;
17291 }
17292
17293 /**
17294  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17295  * @vport: pointer to a vitural port
17296  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17297  *
17298  * This function tries to abort from the assembed sequence from upper level
17299  * protocol, described by the information from basic abbort @dmabuf. It
17300  * checks to see whether such pending context exists at upper level protocol.
17301  * If so, it shall clean up the pending context.
17302  *
17303  * Return
17304  * true  -- if there is matching pending context of the sequence cleaned
17305  *          at ulp;
17306  * false -- if there is no matching pending context of the sequence present
17307  *          at ulp.
17308  **/
17309 static bool
17310 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17311 {
17312         struct lpfc_hba *phba = vport->phba;
17313         int handled;
17314
17315         /* Accepting abort at ulp with SLI4 only */
17316         if (phba->sli_rev < LPFC_SLI_REV4)
17317                 return false;
17318
17319         /* Register all caring upper level protocols to attend abort */
17320         handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17321         if (handled)
17322                 return true;
17323
17324         return false;
17325 }
17326
17327 /**
17328  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17329  * @phba: Pointer to HBA context object.
17330  * @cmd_iocbq: pointer to the command iocbq structure.
17331  * @rsp_iocbq: pointer to the response iocbq structure.
17332  *
17333  * This function handles the sequence abort response iocb command complete
17334  * event. It properly releases the memory allocated to the sequence abort
17335  * accept iocb.
17336  **/
17337 static void
17338 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17339                              struct lpfc_iocbq *cmd_iocbq,
17340                              struct lpfc_iocbq *rsp_iocbq)
17341 {
17342         struct lpfc_nodelist *ndlp;
17343
17344         if (cmd_iocbq) {
17345                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17346                 lpfc_nlp_put(ndlp);
17347                 lpfc_nlp_not_used(ndlp);
17348                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17349         }
17350
17351         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17352         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17353                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17354                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
17355                         rsp_iocbq->iocb.ulpStatus,
17356                         rsp_iocbq->iocb.un.ulpWord[4]);
17357 }
17358
17359 /**
17360  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17361  * @phba: Pointer to HBA context object.
17362  * @xri: xri id in transaction.
17363  *
17364  * This function validates the xri maps to the known range of XRIs allocated an
17365  * used by the driver.
17366  **/
17367 uint16_t
17368 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17369                       uint16_t xri)
17370 {
17371         uint16_t i;
17372
17373         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17374                 if (xri == phba->sli4_hba.xri_ids[i])
17375                         return i;
17376         }
17377         return NO_XRI;
17378 }
17379
17380 /**
17381  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17382  * @phba: Pointer to HBA context object.
17383  * @fc_hdr: pointer to a FC frame header.
17384  *
17385  * This function sends a basic response to a previous unsol sequence abort
17386  * event after aborting the sequence handling.
17387  **/
17388 void
17389 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17390                         struct fc_frame_header *fc_hdr, bool aborted)
17391 {
17392         struct lpfc_hba *phba = vport->phba;
17393         struct lpfc_iocbq *ctiocb = NULL;
17394         struct lpfc_nodelist *ndlp;
17395         uint16_t oxid, rxid, xri, lxri;
17396         uint32_t sid, fctl;
17397         IOCB_t *icmd;
17398         int rc;
17399
17400         if (!lpfc_is_link_up(phba))
17401                 return;
17402
17403         sid = sli4_sid_from_fc_hdr(fc_hdr);
17404         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17405         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17406
17407         ndlp = lpfc_findnode_did(vport, sid);
17408         if (!ndlp) {
17409                 ndlp = lpfc_nlp_init(vport, sid);
17410                 if (!ndlp) {
17411                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17412                                          "1268 Failed to allocate ndlp for "
17413                                          "oxid:x%x SID:x%x\n", oxid, sid);
17414                         return;
17415                 }
17416                 /* Put ndlp onto pport node list */
17417                 lpfc_enqueue_node(vport, ndlp);
17418         } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17419                 /* re-setup ndlp without removing from node list */
17420                 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17421                 if (!ndlp) {
17422                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17423                                          "3275 Failed to active ndlp found "
17424                                          "for oxid:x%x SID:x%x\n", oxid, sid);
17425                         return;
17426                 }
17427         }
17428
17429         /* Allocate buffer for rsp iocb */
17430         ctiocb = lpfc_sli_get_iocbq(phba);
17431         if (!ctiocb)
17432                 return;
17433
17434         /* Extract the F_CTL field from FC_HDR */
17435         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17436
17437         icmd = &ctiocb->iocb;
17438         icmd->un.xseq64.bdl.bdeSize = 0;
17439         icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17440         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17441         icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17442         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17443
17444         /* Fill in the rest of iocb fields */
17445         icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17446         icmd->ulpBdeCount = 0;
17447         icmd->ulpLe = 1;
17448         icmd->ulpClass = CLASS3;
17449         icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17450         ctiocb->context1 = lpfc_nlp_get(ndlp);
17451
17452         ctiocb->vport = phba->pport;
17453         ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17454         ctiocb->sli4_lxritag = NO_XRI;
17455         ctiocb->sli4_xritag = NO_XRI;
17456
17457         if (fctl & FC_FC_EX_CTX)
17458                 /* Exchange responder sent the abort so we
17459                  * own the oxid.
17460                  */
17461                 xri = oxid;
17462         else
17463                 xri = rxid;
17464         lxri = lpfc_sli4_xri_inrange(phba, xri);
17465         if (lxri != NO_XRI)
17466                 lpfc_set_rrq_active(phba, ndlp, lxri,
17467                         (xri == oxid) ? rxid : oxid, 0);
17468         /* For BA_ABTS from exchange responder, if the logical xri with
17469          * the oxid maps to the FCP XRI range, the port no longer has
17470          * that exchange context, send a BLS_RJT. Override the IOCB for
17471          * a BA_RJT.
17472          */
17473         if ((fctl & FC_FC_EX_CTX) &&
17474             (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17475                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17476                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17477                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17478                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17479         }
17480
17481         /* If BA_ABTS failed to abort a partially assembled receive sequence,
17482          * the driver no longer has that exchange, send a BLS_RJT. Override
17483          * the IOCB for a BA_RJT.
17484          */
17485         if (aborted == false) {
17486                 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17487                 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17488                 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17489                 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17490         }
17491
17492         if (fctl & FC_FC_EX_CTX) {
17493                 /* ABTS sent by responder to CT exchange, construction
17494                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17495                  * field and RX_ID from ABTS for RX_ID field.
17496                  */
17497                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17498         } else {
17499                 /* ABTS sent by initiator to CT exchange, construction
17500                  * of BA_ACC will need to allocate a new XRI as for the
17501                  * XRI_TAG field.
17502                  */
17503                 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17504         }
17505         bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17506         bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17507
17508         /* Xmit CT abts response on exchange <xid> */
17509         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17510                          "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17511                          icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17512
17513         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17514         if (rc == IOCB_ERROR) {
17515                 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17516                                  "2925 Failed to issue CT ABTS RSP x%x on "
17517                                  "xri x%x, Data x%x\n",
17518                                  icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17519                                  phba->link_state);
17520                 lpfc_nlp_put(ndlp);
17521                 ctiocb->context1 = NULL;
17522                 lpfc_sli_release_iocbq(phba, ctiocb);
17523         }
17524 }
17525
17526 /**
17527  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17528  * @vport: Pointer to the vport on which this sequence was received
17529  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17530  *
17531  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17532  * receive sequence is only partially assembed by the driver, it shall abort
17533  * the partially assembled frames for the sequence. Otherwise, if the
17534  * unsolicited receive sequence has been completely assembled and passed to
17535  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17536  * unsolicited sequence has been aborted. After that, it will issue a basic
17537  * accept to accept the abort.
17538  **/
17539 static void
17540 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17541                              struct hbq_dmabuf *dmabuf)
17542 {
17543         struct lpfc_hba *phba = vport->phba;
17544         struct fc_frame_header fc_hdr;
17545         uint32_t fctl;
17546         bool aborted;
17547
17548         /* Make a copy of fc_hdr before the dmabuf being released */
17549         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17550         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17551
17552         if (fctl & FC_FC_EX_CTX) {
17553                 /* ABTS by responder to exchange, no cleanup needed */
17554                 aborted = true;
17555         } else {
17556                 /* ABTS by initiator to exchange, need to do cleanup */
17557                 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17558                 if (aborted == false)
17559                         aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17560         }
17561         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17562
17563         if (phba->nvmet_support) {
17564                 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17565                 return;
17566         }
17567
17568         /* Respond with BA_ACC or BA_RJT accordingly */
17569         lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17570 }
17571
17572 /**
17573  * lpfc_seq_complete - Indicates if a sequence is complete
17574  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17575  *
17576  * This function checks the sequence, starting with the frame described by
17577  * @dmabuf, to see if all the frames associated with this sequence are present.
17578  * the frames associated with this sequence are linked to the @dmabuf using the
17579  * dbuf list. This function looks for two major things. 1) That the first frame
17580  * has a sequence count of zero. 2) There is a frame with last frame of sequence
17581  * set. 3) That there are no holes in the sequence count. The function will
17582  * return 1 when the sequence is complete, otherwise it will return 0.
17583  **/
17584 static int
17585 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17586 {
17587         struct fc_frame_header *hdr;
17588         struct lpfc_dmabuf *d_buf;
17589         struct hbq_dmabuf *seq_dmabuf;
17590         uint32_t fctl;
17591         int seq_count = 0;
17592
17593         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17594         /* make sure first fame of sequence has a sequence count of zero */
17595         if (hdr->fh_seq_cnt != seq_count)
17596                 return 0;
17597         fctl = (hdr->fh_f_ctl[0] << 16 |
17598                 hdr->fh_f_ctl[1] << 8 |
17599                 hdr->fh_f_ctl[2]);
17600         /* If last frame of sequence we can return success. */
17601         if (fctl & FC_FC_END_SEQ)
17602                 return 1;
17603         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17604                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17605                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17606                 /* If there is a hole in the sequence count then fail. */
17607                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17608                         return 0;
17609                 fctl = (hdr->fh_f_ctl[0] << 16 |
17610                         hdr->fh_f_ctl[1] << 8 |
17611                         hdr->fh_f_ctl[2]);
17612                 /* If last frame of sequence we can return success. */
17613                 if (fctl & FC_FC_END_SEQ)
17614                         return 1;
17615         }
17616         return 0;
17617 }
17618
17619 /**
17620  * lpfc_prep_seq - Prep sequence for ULP processing
17621  * @vport: Pointer to the vport on which this sequence was received
17622  * @dmabuf: pointer to a dmabuf that describes the FC sequence
17623  *
17624  * This function takes a sequence, described by a list of frames, and creates
17625  * a list of iocbq structures to describe the sequence. This iocbq list will be
17626  * used to issue to the generic unsolicited sequence handler. This routine
17627  * returns a pointer to the first iocbq in the list. If the function is unable
17628  * to allocate an iocbq then it throw out the received frames that were not
17629  * able to be described and return a pointer to the first iocbq. If unable to
17630  * allocate any iocbqs (including the first) this function will return NULL.
17631  **/
17632 static struct lpfc_iocbq *
17633 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17634 {
17635         struct hbq_dmabuf *hbq_buf;
17636         struct lpfc_dmabuf *d_buf, *n_buf;
17637         struct lpfc_iocbq *first_iocbq, *iocbq;
17638         struct fc_frame_header *fc_hdr;
17639         uint32_t sid;
17640         uint32_t len, tot_len;
17641         struct ulp_bde64 *pbde;
17642
17643         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17644         /* remove from receive buffer list */
17645         list_del_init(&seq_dmabuf->hbuf.list);
17646         lpfc_update_rcv_time_stamp(vport);
17647         /* get the Remote Port's SID */
17648         sid = sli4_sid_from_fc_hdr(fc_hdr);
17649         tot_len = 0;
17650         /* Get an iocbq struct to fill in. */
17651         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17652         if (first_iocbq) {
17653                 /* Initialize the first IOCB. */
17654                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17655                 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17656                 first_iocbq->vport = vport;
17657
17658                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17659                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17660                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17661                         first_iocbq->iocb.un.rcvels.parmRo =
17662                                 sli4_did_from_fc_hdr(fc_hdr);
17663                         first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17664                 } else
17665                         first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17666                 first_iocbq->iocb.ulpContext = NO_XRI;
17667                 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17668                         be16_to_cpu(fc_hdr->fh_ox_id);
17669                 /* iocbq is prepped for internal consumption.  Physical vpi. */
17670                 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17671                         vport->phba->vpi_ids[vport->vpi];
17672                 /* put the first buffer into the first IOCBq */
17673                 tot_len = bf_get(lpfc_rcqe_length,
17674                                        &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17675
17676                 first_iocbq->context2 = &seq_dmabuf->dbuf;
17677                 first_iocbq->context3 = NULL;
17678                 first_iocbq->iocb.ulpBdeCount = 1;
17679                 if (tot_len > LPFC_DATA_BUF_SIZE)
17680                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17681                                                         LPFC_DATA_BUF_SIZE;
17682                 else
17683                         first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17684
17685                 first_iocbq->iocb.un.rcvels.remoteID = sid;
17686
17687                 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17688         }
17689         iocbq = first_iocbq;
17690         /*
17691          * Each IOCBq can have two Buffers assigned, so go through the list
17692          * of buffers for this sequence and save two buffers in each IOCBq
17693          */
17694         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17695                 if (!iocbq) {
17696                         lpfc_in_buf_free(vport->phba, d_buf);
17697                         continue;
17698                 }
17699                 if (!iocbq->context3) {
17700                         iocbq->context3 = d_buf;
17701                         iocbq->iocb.ulpBdeCount++;
17702                         /* We need to get the size out of the right CQE */
17703                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17704                         len = bf_get(lpfc_rcqe_length,
17705                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
17706                         pbde = (struct ulp_bde64 *)
17707                                         &iocbq->iocb.unsli3.sli3Words[4];
17708                         if (len > LPFC_DATA_BUF_SIZE)
17709                                 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17710                         else
17711                                 pbde->tus.f.bdeSize = len;
17712
17713                         iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17714                         tot_len += len;
17715                 } else {
17716                         iocbq = lpfc_sli_get_iocbq(vport->phba);
17717                         if (!iocbq) {
17718                                 if (first_iocbq) {
17719                                         first_iocbq->iocb.ulpStatus =
17720                                                         IOSTAT_FCP_RSP_ERROR;
17721                                         first_iocbq->iocb.un.ulpWord[4] =
17722                                                         IOERR_NO_RESOURCES;
17723                                 }
17724                                 lpfc_in_buf_free(vport->phba, d_buf);
17725                                 continue;
17726                         }
17727                         /* We need to get the size out of the right CQE */
17728                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17729                         len = bf_get(lpfc_rcqe_length,
17730                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
17731                         iocbq->context2 = d_buf;
17732                         iocbq->context3 = NULL;
17733                         iocbq->iocb.ulpBdeCount = 1;
17734                         if (len > LPFC_DATA_BUF_SIZE)
17735                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17736                                                         LPFC_DATA_BUF_SIZE;
17737                         else
17738                                 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17739
17740                         tot_len += len;
17741                         iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17742
17743                         iocbq->iocb.un.rcvels.remoteID = sid;
17744                         list_add_tail(&iocbq->list, &first_iocbq->list);
17745                 }
17746         }
17747         return first_iocbq;
17748 }
17749
17750 static void
17751 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17752                           struct hbq_dmabuf *seq_dmabuf)
17753 {
17754         struct fc_frame_header *fc_hdr;
17755         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17756         struct lpfc_hba *phba = vport->phba;
17757
17758         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17759         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17760         if (!iocbq) {
17761                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17762                                 "2707 Ring %d handler: Failed to allocate "
17763                                 "iocb Rctl x%x Type x%x received\n",
17764                                 LPFC_ELS_RING,
17765                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17766                 return;
17767         }
17768         if (!lpfc_complete_unsol_iocb(phba,
17769                                       phba->sli4_hba.els_wq->pring,
17770                                       iocbq, fc_hdr->fh_r_ctl,
17771                                       fc_hdr->fh_type))
17772                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17773                                 "2540 Ring %d handler: unexpected Rctl "
17774                                 "x%x Type x%x received\n",
17775                                 LPFC_ELS_RING,
17776                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17777
17778         /* Free iocb created in lpfc_prep_seq */
17779         list_for_each_entry_safe(curr_iocb, next_iocb,
17780                 &iocbq->list, list) {
17781                 list_del_init(&curr_iocb->list);
17782                 lpfc_sli_release_iocbq(phba, curr_iocb);
17783         }
17784         lpfc_sli_release_iocbq(phba, iocbq);
17785 }
17786
17787 static void
17788 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17789                             struct lpfc_iocbq *rspiocb)
17790 {
17791         struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17792
17793         if (pcmd && pcmd->virt)
17794                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17795         kfree(pcmd);
17796         lpfc_sli_release_iocbq(phba, cmdiocb);
17797         lpfc_drain_txq(phba);
17798 }
17799
17800 static void
17801 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17802                               struct hbq_dmabuf *dmabuf)
17803 {
17804         struct fc_frame_header *fc_hdr;
17805         struct lpfc_hba *phba = vport->phba;
17806         struct lpfc_iocbq *iocbq = NULL;
17807         union  lpfc_wqe *wqe;
17808         struct lpfc_dmabuf *pcmd = NULL;
17809         uint32_t frame_len;
17810         int rc;
17811         unsigned long iflags;
17812
17813         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17814         frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17815
17816         /* Send the received frame back */
17817         iocbq = lpfc_sli_get_iocbq(phba);
17818         if (!iocbq) {
17819                 /* Queue cq event and wakeup worker thread to process it */
17820                 spin_lock_irqsave(&phba->hbalock, iflags);
17821                 list_add_tail(&dmabuf->cq_event.list,
17822                               &phba->sli4_hba.sp_queue_event);
17823                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17824                 spin_unlock_irqrestore(&phba->hbalock, iflags);
17825                 lpfc_worker_wake_up(phba);
17826                 return;
17827         }
17828
17829         /* Allocate buffer for command payload */
17830         pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17831         if (pcmd)
17832                 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17833                                             &pcmd->phys);
17834         if (!pcmd || !pcmd->virt)
17835                 goto exit;
17836
17837         INIT_LIST_HEAD(&pcmd->list);
17838
17839         /* copyin the payload */
17840         memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17841
17842         /* fill in BDE's for command */
17843         iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17844         iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17845         iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17846         iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17847
17848         iocbq->context2 = pcmd;
17849         iocbq->vport = vport;
17850         iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17851         iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17852
17853         /*
17854          * Setup rest of the iocb as though it were a WQE
17855          * Build the SEND_FRAME WQE
17856          */
17857         wqe = (union lpfc_wqe *)&iocbq->iocb;
17858
17859         wqe->send_frame.frame_len = frame_len;
17860         wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17861         wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17862         wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17863         wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17864         wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17865         wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17866
17867         iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17868         iocbq->iocb.ulpLe = 1;
17869         iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17870         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17871         if (rc == IOCB_ERROR)
17872                 goto exit;
17873
17874         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17875         return;
17876
17877 exit:
17878         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17879                         "2023 Unable to process MDS loopback frame\n");
17880         if (pcmd && pcmd->virt)
17881                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17882         kfree(pcmd);
17883         if (iocbq)
17884                 lpfc_sli_release_iocbq(phba, iocbq);
17885         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17886 }
17887
17888 /**
17889  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17890  * @phba: Pointer to HBA context object.
17891  *
17892  * This function is called with no lock held. This function processes all
17893  * the received buffers and gives it to upper layers when a received buffer
17894  * indicates that it is the final frame in the sequence. The interrupt
17895  * service routine processes received buffers at interrupt contexts.
17896  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17897  * appropriate receive function when the final frame in a sequence is received.
17898  **/
17899 void
17900 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17901                                  struct hbq_dmabuf *dmabuf)
17902 {
17903         struct hbq_dmabuf *seq_dmabuf;
17904         struct fc_frame_header *fc_hdr;
17905         struct lpfc_vport *vport;
17906         uint32_t fcfi;
17907         uint32_t did;
17908
17909         /* Process each received buffer */
17910         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17911
17912         if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17913             fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17914                 vport = phba->pport;
17915                 /* Handle MDS Loopback frames */
17916                 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17917                 return;
17918         }
17919
17920         /* check to see if this a valid type of frame */
17921         if (lpfc_fc_frame_check(phba, fc_hdr)) {
17922                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17923                 return;
17924         }
17925
17926         if ((bf_get(lpfc_cqe_code,
17927                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17928                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17929                               &dmabuf->cq_event.cqe.rcqe_cmpl);
17930         else
17931                 fcfi = bf_get(lpfc_rcqe_fcf_id,
17932                               &dmabuf->cq_event.cqe.rcqe_cmpl);
17933
17934         if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17935                 vport = phba->pport;
17936                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17937                                 "2023 MDS Loopback %d bytes\n",
17938                                 bf_get(lpfc_rcqe_length,
17939                                        &dmabuf->cq_event.cqe.rcqe_cmpl));
17940                 /* Handle MDS Loopback frames */
17941                 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17942                 return;
17943         }
17944
17945         /* d_id this frame is directed to */
17946         did = sli4_did_from_fc_hdr(fc_hdr);
17947
17948         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17949         if (!vport) {
17950                 /* throw out the frame */
17951                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17952                 return;
17953         }
17954
17955         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17956         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17957                 (did != Fabric_DID)) {
17958                 /*
17959                  * Throw out the frame if we are not pt2pt.
17960                  * The pt2pt protocol allows for discovery frames
17961                  * to be received without a registered VPI.
17962                  */
17963                 if (!(vport->fc_flag & FC_PT2PT) ||
17964                         (phba->link_state == LPFC_HBA_READY)) {
17965                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
17966                         return;
17967                 }
17968         }
17969
17970         /* Handle the basic abort sequence (BA_ABTS) event */
17971         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17972                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17973                 return;
17974         }
17975
17976         /* Link this frame */
17977         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17978         if (!seq_dmabuf) {
17979                 /* unable to add frame to vport - throw it out */
17980                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17981                 return;
17982         }
17983         /* If not last frame in sequence continue processing frames. */
17984         if (!lpfc_seq_complete(seq_dmabuf))
17985                 return;
17986
17987         /* Send the complete sequence to the upper layer protocol */
17988         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17989 }
17990
17991 /**
17992  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17993  * @phba: pointer to lpfc hba data structure.
17994  *
17995  * This routine is invoked to post rpi header templates to the
17996  * HBA consistent with the SLI-4 interface spec.  This routine
17997  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17998  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17999  *
18000  * This routine does not require any locks.  It's usage is expected
18001  * to be driver load or reset recovery when the driver is
18002  * sequential.
18003  *
18004  * Return codes
18005  *      0 - successful
18006  *      -EIO - The mailbox failed to complete successfully.
18007  *      When this error occurs, the driver is not guaranteed
18008  *      to have any rpi regions posted to the device and
18009  *      must either attempt to repost the regions or take a
18010  *      fatal error.
18011  **/
18012 int
18013 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18014 {
18015         struct lpfc_rpi_hdr *rpi_page;
18016         uint32_t rc = 0;
18017         uint16_t lrpi = 0;
18018
18019         /* SLI4 ports that support extents do not require RPI headers. */
18020         if (!phba->sli4_hba.rpi_hdrs_in_use)
18021                 goto exit;
18022         if (phba->sli4_hba.extents_in_use)
18023                 return -EIO;
18024
18025         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18026                 /*
18027                  * Assign the rpi headers a physical rpi only if the driver
18028                  * has not initialized those resources.  A port reset only
18029                  * needs the headers posted.
18030                  */
18031                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18032                     LPFC_RPI_RSRC_RDY)
18033                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18034
18035                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18036                 if (rc != MBX_SUCCESS) {
18037                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18038                                         "2008 Error %d posting all rpi "
18039                                         "headers\n", rc);
18040                         rc = -EIO;
18041                         break;
18042                 }
18043         }
18044
18045  exit:
18046         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18047                LPFC_RPI_RSRC_RDY);
18048         return rc;
18049 }
18050
18051 /**
18052  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18053  * @phba: pointer to lpfc hba data structure.
18054  * @rpi_page:  pointer to the rpi memory region.
18055  *
18056  * This routine is invoked to post a single rpi header to the
18057  * HBA consistent with the SLI-4 interface spec.  This memory region
18058  * maps up to 64 rpi context regions.
18059  *
18060  * Return codes
18061  *      0 - successful
18062  *      -ENOMEM - No available memory
18063  *      -EIO - The mailbox failed to complete successfully.
18064  **/
18065 int
18066 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18067 {
18068         LPFC_MBOXQ_t *mboxq;
18069         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18070         uint32_t rc = 0;
18071         uint32_t shdr_status, shdr_add_status;
18072         union lpfc_sli4_cfg_shdr *shdr;
18073
18074         /* SLI4 ports that support extents do not require RPI headers. */
18075         if (!phba->sli4_hba.rpi_hdrs_in_use)
18076                 return rc;
18077         if (phba->sli4_hba.extents_in_use)
18078                 return -EIO;
18079
18080         /* The port is notified of the header region via a mailbox command. */
18081         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18082         if (!mboxq) {
18083                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18084                                 "2001 Unable to allocate memory for issuing "
18085                                 "SLI_CONFIG_SPECIAL mailbox command\n");
18086                 return -ENOMEM;
18087         }
18088
18089         /* Post all rpi memory regions to the port. */
18090         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18091         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18092                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18093                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18094                          sizeof(struct lpfc_sli4_cfg_mhdr),
18095                          LPFC_SLI4_MBX_EMBED);
18096
18097
18098         /* Post the physical rpi to the port for this rpi header. */
18099         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18100                rpi_page->start_rpi);
18101         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18102                hdr_tmpl, rpi_page->page_count);
18103
18104         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18105         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18106         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18107         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18108         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18109         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18110         if (rc != MBX_TIMEOUT)
18111                 mempool_free(mboxq, phba->mbox_mem_pool);
18112         if (shdr_status || shdr_add_status || rc) {
18113                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18114                                 "2514 POST_RPI_HDR mailbox failed with "
18115                                 "status x%x add_status x%x, mbx status x%x\n",
18116                                 shdr_status, shdr_add_status, rc);
18117                 rc = -ENXIO;
18118         } else {
18119                 /*
18120                  * The next_rpi stores the next logical module-64 rpi value used
18121                  * to post physical rpis in subsequent rpi postings.
18122                  */
18123                 spin_lock_irq(&phba->hbalock);
18124                 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18125                 spin_unlock_irq(&phba->hbalock);
18126         }
18127         return rc;
18128 }
18129
18130 /**
18131  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18132  * @phba: pointer to lpfc hba data structure.
18133  *
18134  * This routine is invoked to post rpi header templates to the
18135  * HBA consistent with the SLI-4 interface spec.  This routine
18136  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18137  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18138  *
18139  * Returns
18140  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18141  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
18142  **/
18143 int
18144 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18145 {
18146         unsigned long rpi;
18147         uint16_t max_rpi, rpi_limit;
18148         uint16_t rpi_remaining, lrpi = 0;
18149         struct lpfc_rpi_hdr *rpi_hdr;
18150         unsigned long iflag;
18151
18152         /*
18153          * Fetch the next logical rpi.  Because this index is logical,
18154          * the  driver starts at 0 each time.
18155          */
18156         spin_lock_irqsave(&phba->hbalock, iflag);
18157         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18158         rpi_limit = phba->sli4_hba.next_rpi;
18159
18160         rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18161         if (rpi >= rpi_limit)
18162                 rpi = LPFC_RPI_ALLOC_ERROR;
18163         else {
18164                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18165                 phba->sli4_hba.max_cfg_param.rpi_used++;
18166                 phba->sli4_hba.rpi_count++;
18167         }
18168         lpfc_printf_log(phba, KERN_INFO,
18169                         LOG_NODE | LOG_DISCOVERY,
18170                         "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18171                         (int) rpi, max_rpi, rpi_limit);
18172
18173         /*
18174          * Don't try to allocate more rpi header regions if the device limit
18175          * has been exhausted.
18176          */
18177         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18178             (phba->sli4_hba.rpi_count >= max_rpi)) {
18179                 spin_unlock_irqrestore(&phba->hbalock, iflag);
18180                 return rpi;
18181         }
18182
18183         /*
18184          * RPI header postings are not required for SLI4 ports capable of
18185          * extents.
18186          */
18187         if (!phba->sli4_hba.rpi_hdrs_in_use) {
18188                 spin_unlock_irqrestore(&phba->hbalock, iflag);
18189                 return rpi;
18190         }
18191
18192         /*
18193          * If the driver is running low on rpi resources, allocate another
18194          * page now.  Note that the next_rpi value is used because
18195          * it represents how many are actually in use whereas max_rpi notes
18196          * how many are supported max by the device.
18197          */
18198         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18199         spin_unlock_irqrestore(&phba->hbalock, iflag);
18200         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18201                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18202                 if (!rpi_hdr) {
18203                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18204                                         "2002 Error Could not grow rpi "
18205                                         "count\n");
18206                 } else {
18207                         lrpi = rpi_hdr->start_rpi;
18208                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18209                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18210                 }
18211         }
18212
18213         return rpi;
18214 }
18215
18216 /**
18217  * lpfc_sli4_free_rpi - Release an rpi for reuse.
18218  * @phba: pointer to lpfc hba data structure.
18219  *
18220  * This routine is invoked to release an rpi to the pool of
18221  * available rpis maintained by the driver.
18222  **/
18223 static void
18224 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18225 {
18226         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18227                 phba->sli4_hba.rpi_count--;
18228                 phba->sli4_hba.max_cfg_param.rpi_used--;
18229         } else {
18230                 lpfc_printf_log(phba, KERN_INFO,
18231                                 LOG_NODE | LOG_DISCOVERY,
18232                                 "2016 rpi %x not inuse\n",
18233                                 rpi);
18234         }
18235 }
18236
18237 /**
18238  * lpfc_sli4_free_rpi - Release an rpi for reuse.
18239  * @phba: pointer to lpfc hba data structure.
18240  *
18241  * This routine is invoked to release an rpi to the pool of
18242  * available rpis maintained by the driver.
18243  **/
18244 void
18245 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18246 {
18247         spin_lock_irq(&phba->hbalock);
18248         __lpfc_sli4_free_rpi(phba, rpi);
18249         spin_unlock_irq(&phba->hbalock);
18250 }
18251
18252 /**
18253  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18254  * @phba: pointer to lpfc hba data structure.
18255  *
18256  * This routine is invoked to remove the memory region that
18257  * provided rpi via a bitmask.
18258  **/
18259 void
18260 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18261 {
18262         kfree(phba->sli4_hba.rpi_bmask);
18263         kfree(phba->sli4_hba.rpi_ids);
18264         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18265 }
18266
18267 /**
18268  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18269  * @phba: pointer to lpfc hba data structure.
18270  *
18271  * This routine is invoked to remove the memory region that
18272  * provided rpi via a bitmask.
18273  **/
18274 int
18275 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18276         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18277 {
18278         LPFC_MBOXQ_t *mboxq;
18279         struct lpfc_hba *phba = ndlp->phba;
18280         int rc;
18281
18282         /* The port is notified of the header region via a mailbox command. */
18283         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18284         if (!mboxq)
18285                 return -ENOMEM;
18286
18287         /* Post all rpi memory regions to the port. */
18288         lpfc_resume_rpi(mboxq, ndlp);
18289         if (cmpl) {
18290                 mboxq->mbox_cmpl = cmpl;
18291                 mboxq->ctx_buf = arg;
18292                 mboxq->ctx_ndlp = ndlp;
18293         } else
18294                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18295         mboxq->vport = ndlp->vport;
18296         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18297         if (rc == MBX_NOT_FINISHED) {
18298                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18299                                 "2010 Resume RPI Mailbox failed "
18300                                 "status %d, mbxStatus x%x\n", rc,
18301                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18302                 mempool_free(mboxq, phba->mbox_mem_pool);
18303                 return -EIO;
18304         }
18305         return 0;
18306 }
18307
18308 /**
18309  * lpfc_sli4_init_vpi - Initialize a vpi with the port
18310  * @vport: Pointer to the vport for which the vpi is being initialized
18311  *
18312  * This routine is invoked to activate a vpi with the port.
18313  *
18314  * Returns:
18315  *    0 success
18316  *    -Evalue otherwise
18317  **/
18318 int
18319 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18320 {
18321         LPFC_MBOXQ_t *mboxq;
18322         int rc = 0;
18323         int retval = MBX_SUCCESS;
18324         uint32_t mbox_tmo;
18325         struct lpfc_hba *phba = vport->phba;
18326         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18327         if (!mboxq)
18328                 return -ENOMEM;
18329         lpfc_init_vpi(phba, mboxq, vport->vpi);
18330         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18331         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18332         if (rc != MBX_SUCCESS) {
18333                 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18334                                 "2022 INIT VPI Mailbox failed "
18335                                 "status %d, mbxStatus x%x\n", rc,
18336                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18337                 retval = -EIO;
18338         }
18339         if (rc != MBX_TIMEOUT)
18340                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18341
18342         return retval;
18343 }
18344
18345 /**
18346  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18347  * @phba: pointer to lpfc hba data structure.
18348  * @mboxq: Pointer to mailbox object.
18349  *
18350  * This routine is invoked to manually add a single FCF record. The caller
18351  * must pass a completely initialized FCF_Record.  This routine takes
18352  * care of the nonembedded mailbox operations.
18353  **/
18354 static void
18355 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18356 {
18357         void *virt_addr;
18358         union lpfc_sli4_cfg_shdr *shdr;
18359         uint32_t shdr_status, shdr_add_status;
18360
18361         virt_addr = mboxq->sge_array->addr[0];
18362         /* The IOCTL status is embedded in the mailbox subheader. */
18363         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18364         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18365         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18366
18367         if ((shdr_status || shdr_add_status) &&
18368                 (shdr_status != STATUS_FCF_IN_USE))
18369                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18370                         "2558 ADD_FCF_RECORD mailbox failed with "
18371                         "status x%x add_status x%x\n",
18372                         shdr_status, shdr_add_status);
18373
18374         lpfc_sli4_mbox_cmd_free(phba, mboxq);
18375 }
18376
18377 /**
18378  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18379  * @phba: pointer to lpfc hba data structure.
18380  * @fcf_record:  pointer to the initialized fcf record to add.
18381  *
18382  * This routine is invoked to manually add a single FCF record. The caller
18383  * must pass a completely initialized FCF_Record.  This routine takes
18384  * care of the nonembedded mailbox operations.
18385  **/
18386 int
18387 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18388 {
18389         int rc = 0;
18390         LPFC_MBOXQ_t *mboxq;
18391         uint8_t *bytep;
18392         void *virt_addr;
18393         struct lpfc_mbx_sge sge;
18394         uint32_t alloc_len, req_len;
18395         uint32_t fcfindex;
18396
18397         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18398         if (!mboxq) {
18399                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18400                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18401                 return -ENOMEM;
18402         }
18403
18404         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18405                   sizeof(uint32_t);
18406
18407         /* Allocate DMA memory and set up the non-embedded mailbox command */
18408         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18409                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18410                                      req_len, LPFC_SLI4_MBX_NEMBED);
18411         if (alloc_len < req_len) {
18412                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18413                         "2523 Allocated DMA memory size (x%x) is "
18414                         "less than the requested DMA memory "
18415                         "size (x%x)\n", alloc_len, req_len);
18416                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18417                 return -ENOMEM;
18418         }
18419
18420         /*
18421          * Get the first SGE entry from the non-embedded DMA memory.  This
18422          * routine only uses a single SGE.
18423          */
18424         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18425         virt_addr = mboxq->sge_array->addr[0];
18426         /*
18427          * Configure the FCF record for FCFI 0.  This is the driver's
18428          * hardcoded default and gets used in nonFIP mode.
18429          */
18430         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18431         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18432         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18433
18434         /*
18435          * Copy the fcf_index and the FCF Record Data. The data starts after
18436          * the FCoE header plus word10. The data copy needs to be endian
18437          * correct.
18438          */
18439         bytep += sizeof(uint32_t);
18440         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18441         mboxq->vport = phba->pport;
18442         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18443         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18444         if (rc == MBX_NOT_FINISHED) {
18445                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18446                         "2515 ADD_FCF_RECORD mailbox failed with "
18447                         "status 0x%x\n", rc);
18448                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18449                 rc = -EIO;
18450         } else
18451                 rc = 0;
18452
18453         return rc;
18454 }
18455
18456 /**
18457  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18458  * @phba: pointer to lpfc hba data structure.
18459  * @fcf_record:  pointer to the fcf record to write the default data.
18460  * @fcf_index: FCF table entry index.
18461  *
18462  * This routine is invoked to build the driver's default FCF record.  The
18463  * values used are hardcoded.  This routine handles memory initialization.
18464  *
18465  **/
18466 void
18467 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18468                                 struct fcf_record *fcf_record,
18469                                 uint16_t fcf_index)
18470 {
18471         memset(fcf_record, 0, sizeof(struct fcf_record));
18472         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18473         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18474         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18475         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18476         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18477         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18478         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18479         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18480         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18481         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18482         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18483         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18484         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18485         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18486         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18487         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18488                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18489         /* Set the VLAN bit map */
18490         if (phba->valid_vlan) {
18491                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18492                         = 1 << (phba->vlan_id % 8);
18493         }
18494 }
18495
18496 /**
18497  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18498  * @phba: pointer to lpfc hba data structure.
18499  * @fcf_index: FCF table entry offset.
18500  *
18501  * This routine is invoked to scan the entire FCF table by reading FCF
18502  * record and processing it one at a time starting from the @fcf_index
18503  * for initial FCF discovery or fast FCF failover rediscovery.
18504  *
18505  * Return 0 if the mailbox command is submitted successfully, none 0
18506  * otherwise.
18507  **/
18508 int
18509 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18510 {
18511         int rc = 0, error;
18512         LPFC_MBOXQ_t *mboxq;
18513
18514         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18515         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18516         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18517         if (!mboxq) {
18518                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18519                                 "2000 Failed to allocate mbox for "
18520                                 "READ_FCF cmd\n");
18521                 error = -ENOMEM;
18522                 goto fail_fcf_scan;
18523         }
18524         /* Construct the read FCF record mailbox command */
18525         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18526         if (rc) {
18527                 error = -EINVAL;
18528                 goto fail_fcf_scan;
18529         }
18530         /* Issue the mailbox command asynchronously */
18531         mboxq->vport = phba->pport;
18532         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18533
18534         spin_lock_irq(&phba->hbalock);
18535         phba->hba_flag |= FCF_TS_INPROG;
18536         spin_unlock_irq(&phba->hbalock);
18537
18538         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18539         if (rc == MBX_NOT_FINISHED)
18540                 error = -EIO;
18541         else {
18542                 /* Reset eligible FCF count for new scan */
18543                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18544                         phba->fcf.eligible_fcf_cnt = 0;
18545                 error = 0;
18546         }
18547 fail_fcf_scan:
18548         if (error) {
18549                 if (mboxq)
18550                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
18551                 /* FCF scan failed, clear FCF_TS_INPROG flag */
18552                 spin_lock_irq(&phba->hbalock);
18553                 phba->hba_flag &= ~FCF_TS_INPROG;
18554                 spin_unlock_irq(&phba->hbalock);
18555         }
18556         return error;
18557 }
18558
18559 /**
18560  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18561  * @phba: pointer to lpfc hba data structure.
18562  * @fcf_index: FCF table entry offset.
18563  *
18564  * This routine is invoked to read an FCF record indicated by @fcf_index
18565  * and to use it for FLOGI roundrobin FCF failover.
18566  *
18567  * Return 0 if the mailbox command is submitted successfully, none 0
18568  * otherwise.
18569  **/
18570 int
18571 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18572 {
18573         int rc = 0, error;
18574         LPFC_MBOXQ_t *mboxq;
18575
18576         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18577         if (!mboxq) {
18578                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18579                                 "2763 Failed to allocate mbox for "
18580                                 "READ_FCF cmd\n");
18581                 error = -ENOMEM;
18582                 goto fail_fcf_read;
18583         }
18584         /* Construct the read FCF record mailbox command */
18585         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18586         if (rc) {
18587                 error = -EINVAL;
18588                 goto fail_fcf_read;
18589         }
18590         /* Issue the mailbox command asynchronously */
18591         mboxq->vport = phba->pport;
18592         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18593         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18594         if (rc == MBX_NOT_FINISHED)
18595                 error = -EIO;
18596         else
18597                 error = 0;
18598
18599 fail_fcf_read:
18600         if (error && mboxq)
18601                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18602         return error;
18603 }
18604
18605 /**
18606  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18607  * @phba: pointer to lpfc hba data structure.
18608  * @fcf_index: FCF table entry offset.
18609  *
18610  * This routine is invoked to read an FCF record indicated by @fcf_index to
18611  * determine whether it's eligible for FLOGI roundrobin failover list.
18612  *
18613  * Return 0 if the mailbox command is submitted successfully, none 0
18614  * otherwise.
18615  **/
18616 int
18617 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18618 {
18619         int rc = 0, error;
18620         LPFC_MBOXQ_t *mboxq;
18621
18622         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18623         if (!mboxq) {
18624                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18625                                 "2758 Failed to allocate mbox for "
18626                                 "READ_FCF cmd\n");
18627                                 error = -ENOMEM;
18628                                 goto fail_fcf_read;
18629         }
18630         /* Construct the read FCF record mailbox command */
18631         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18632         if (rc) {
18633                 error = -EINVAL;
18634                 goto fail_fcf_read;
18635         }
18636         /* Issue the mailbox command asynchronously */
18637         mboxq->vport = phba->pport;
18638         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18639         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18640         if (rc == MBX_NOT_FINISHED)
18641                 error = -EIO;
18642         else
18643                 error = 0;
18644
18645 fail_fcf_read:
18646         if (error && mboxq)
18647                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18648         return error;
18649 }
18650
18651 /**
18652  * lpfc_check_next_fcf_pri_level
18653  * phba pointer to the lpfc_hba struct for this port.
18654  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18655  * routine when the rr_bmask is empty. The FCF indecies are put into the
18656  * rr_bmask based on their priority level. Starting from the highest priority
18657  * to the lowest. The most likely FCF candidate will be in the highest
18658  * priority group. When this routine is called it searches the fcf_pri list for
18659  * next lowest priority group and repopulates the rr_bmask with only those
18660  * fcf_indexes.
18661  * returns:
18662  * 1=success 0=failure
18663  **/
18664 static int
18665 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18666 {
18667         uint16_t next_fcf_pri;
18668         uint16_t last_index;
18669         struct lpfc_fcf_pri *fcf_pri;
18670         int rc;
18671         int ret = 0;
18672
18673         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18674                         LPFC_SLI4_FCF_TBL_INDX_MAX);
18675         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18676                         "3060 Last IDX %d\n", last_index);
18677
18678         /* Verify the priority list has 2 or more entries */
18679         spin_lock_irq(&phba->hbalock);
18680         if (list_empty(&phba->fcf.fcf_pri_list) ||
18681             list_is_singular(&phba->fcf.fcf_pri_list)) {
18682                 spin_unlock_irq(&phba->hbalock);
18683                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18684                         "3061 Last IDX %d\n", last_index);
18685                 return 0; /* Empty rr list */
18686         }
18687         spin_unlock_irq(&phba->hbalock);
18688
18689         next_fcf_pri = 0;
18690         /*
18691          * Clear the rr_bmask and set all of the bits that are at this
18692          * priority.
18693          */
18694         memset(phba->fcf.fcf_rr_bmask, 0,
18695                         sizeof(*phba->fcf.fcf_rr_bmask));
18696         spin_lock_irq(&phba->hbalock);
18697         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18698                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18699                         continue;
18700                 /*
18701                  * the 1st priority that has not FLOGI failed
18702                  * will be the highest.
18703                  */
18704                 if (!next_fcf_pri)
18705                         next_fcf_pri = fcf_pri->fcf_rec.priority;
18706                 spin_unlock_irq(&phba->hbalock);
18707                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18708                         rc = lpfc_sli4_fcf_rr_index_set(phba,
18709                                                 fcf_pri->fcf_rec.fcf_index);
18710                         if (rc)
18711                                 return 0;
18712                 }
18713                 spin_lock_irq(&phba->hbalock);
18714         }
18715         /*
18716          * if next_fcf_pri was not set above and the list is not empty then
18717          * we have failed flogis on all of them. So reset flogi failed
18718          * and start at the beginning.
18719          */
18720         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18721                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18722                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18723                         /*
18724                          * the 1st priority that has not FLOGI failed
18725                          * will be the highest.
18726                          */
18727                         if (!next_fcf_pri)
18728                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
18729                         spin_unlock_irq(&phba->hbalock);
18730                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18731                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
18732                                                 fcf_pri->fcf_rec.fcf_index);
18733                                 if (rc)
18734                                         return 0;
18735                         }
18736                         spin_lock_irq(&phba->hbalock);
18737                 }
18738         } else
18739                 ret = 1;
18740         spin_unlock_irq(&phba->hbalock);
18741
18742         return ret;
18743 }
18744 /**
18745  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18746  * @phba: pointer to lpfc hba data structure.
18747  *
18748  * This routine is to get the next eligible FCF record index in a round
18749  * robin fashion. If the next eligible FCF record index equals to the
18750  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18751  * shall be returned, otherwise, the next eligible FCF record's index
18752  * shall be returned.
18753  **/
18754 uint16_t
18755 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18756 {
18757         uint16_t next_fcf_index;
18758
18759 initial_priority:
18760         /* Search start from next bit of currently registered FCF index */
18761         next_fcf_index = phba->fcf.current_rec.fcf_indx;
18762
18763 next_priority:
18764         /* Determine the next fcf index to check */
18765         next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18766         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18767                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
18768                                        next_fcf_index);
18769
18770         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18771         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18772                 /*
18773                  * If we have wrapped then we need to clear the bits that
18774                  * have been tested so that we can detect when we should
18775                  * change the priority level.
18776                  */
18777                 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18778                                                LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18779         }
18780
18781
18782         /* Check roundrobin failover list empty condition */
18783         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18784                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18785                 /*
18786                  * If next fcf index is not found check if there are lower
18787                  * Priority level fcf's in the fcf_priority list.
18788                  * Set up the rr_bmask with all of the avaiable fcf bits
18789                  * at that level and continue the selection process.
18790                  */
18791                 if (lpfc_check_next_fcf_pri_level(phba))
18792                         goto initial_priority;
18793                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18794                                 "2844 No roundrobin failover FCF available\n");
18795
18796                 return LPFC_FCOE_FCF_NEXT_NONE;
18797         }
18798
18799         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18800                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18801                 LPFC_FCF_FLOGI_FAILED) {
18802                 if (list_is_singular(&phba->fcf.fcf_pri_list))
18803                         return LPFC_FCOE_FCF_NEXT_NONE;
18804
18805                 goto next_priority;
18806         }
18807
18808         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18809                         "2845 Get next roundrobin failover FCF (x%x)\n",
18810                         next_fcf_index);
18811
18812         return next_fcf_index;
18813 }
18814
18815 /**
18816  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18817  * @phba: pointer to lpfc hba data structure.
18818  *
18819  * This routine sets the FCF record index in to the eligible bmask for
18820  * roundrobin failover search. It checks to make sure that the index
18821  * does not go beyond the range of the driver allocated bmask dimension
18822  * before setting the bit.
18823  *
18824  * Returns 0 if the index bit successfully set, otherwise, it returns
18825  * -EINVAL.
18826  **/
18827 int
18828 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18829 {
18830         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18831                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18832                                 "2610 FCF (x%x) reached driver's book "
18833                                 "keeping dimension:x%x\n",
18834                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18835                 return -EINVAL;
18836         }
18837         /* Set the eligible FCF record index bmask */
18838         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18839
18840         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18841                         "2790 Set FCF (x%x) to roundrobin FCF failover "
18842                         "bmask\n", fcf_index);
18843
18844         return 0;
18845 }
18846
18847 /**
18848  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18849  * @phba: pointer to lpfc hba data structure.
18850  *
18851  * This routine clears the FCF record index from the eligible bmask for
18852  * roundrobin failover search. It checks to make sure that the index
18853  * does not go beyond the range of the driver allocated bmask dimension
18854  * before clearing the bit.
18855  **/
18856 void
18857 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18858 {
18859         struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18860         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18861                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18862                                 "2762 FCF (x%x) reached driver's book "
18863                                 "keeping dimension:x%x\n",
18864                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18865                 return;
18866         }
18867         /* Clear the eligible FCF record index bmask */
18868         spin_lock_irq(&phba->hbalock);
18869         list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18870                                  list) {
18871                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18872                         list_del_init(&fcf_pri->list);
18873                         break;
18874                 }
18875         }
18876         spin_unlock_irq(&phba->hbalock);
18877         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18878
18879         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18880                         "2791 Clear FCF (x%x) from roundrobin failover "
18881                         "bmask\n", fcf_index);
18882 }
18883
18884 /**
18885  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18886  * @phba: pointer to lpfc hba data structure.
18887  *
18888  * This routine is the completion routine for the rediscover FCF table mailbox
18889  * command. If the mailbox command returned failure, it will try to stop the
18890  * FCF rediscover wait timer.
18891  **/
18892 static void
18893 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18894 {
18895         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18896         uint32_t shdr_status, shdr_add_status;
18897
18898         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18899
18900         shdr_status = bf_get(lpfc_mbox_hdr_status,
18901                              &redisc_fcf->header.cfg_shdr.response);
18902         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18903                              &redisc_fcf->header.cfg_shdr.response);
18904         if (shdr_status || shdr_add_status) {
18905                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18906                                 "2746 Requesting for FCF rediscovery failed "
18907                                 "status x%x add_status x%x\n",
18908                                 shdr_status, shdr_add_status);
18909                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18910                         spin_lock_irq(&phba->hbalock);
18911                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18912                         spin_unlock_irq(&phba->hbalock);
18913                         /*
18914                          * CVL event triggered FCF rediscover request failed,
18915                          * last resort to re-try current registered FCF entry.
18916                          */
18917                         lpfc_retry_pport_discovery(phba);
18918                 } else {
18919                         spin_lock_irq(&phba->hbalock);
18920                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18921                         spin_unlock_irq(&phba->hbalock);
18922                         /*
18923                          * DEAD FCF event triggered FCF rediscover request
18924                          * failed, last resort to fail over as a link down
18925                          * to FCF registration.
18926                          */
18927                         lpfc_sli4_fcf_dead_failthrough(phba);
18928                 }
18929         } else {
18930                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18931                                 "2775 Start FCF rediscover quiescent timer\n");
18932                 /*
18933                  * Start FCF rediscovery wait timer for pending FCF
18934                  * before rescan FCF record table.
18935                  */
18936                 lpfc_fcf_redisc_wait_start_timer(phba);
18937         }
18938
18939         mempool_free(mbox, phba->mbox_mem_pool);
18940 }
18941
18942 /**
18943  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18944  * @phba: pointer to lpfc hba data structure.
18945  *
18946  * This routine is invoked to request for rediscovery of the entire FCF table
18947  * by the port.
18948  **/
18949 int
18950 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18951 {
18952         LPFC_MBOXQ_t *mbox;
18953         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18954         int rc, length;
18955
18956         /* Cancel retry delay timers to all vports before FCF rediscover */
18957         lpfc_cancel_all_vport_retry_delay_timer(phba);
18958
18959         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18960         if (!mbox) {
18961                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18962                                 "2745 Failed to allocate mbox for "
18963                                 "requesting FCF rediscover.\n");
18964                 return -ENOMEM;
18965         }
18966
18967         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18968                   sizeof(struct lpfc_sli4_cfg_mhdr));
18969         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18970                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18971                          length, LPFC_SLI4_MBX_EMBED);
18972
18973         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18974         /* Set count to 0 for invalidating the entire FCF database */
18975         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18976
18977         /* Issue the mailbox command asynchronously */
18978         mbox->vport = phba->pport;
18979         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18980         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18981
18982         if (rc == MBX_NOT_FINISHED) {
18983                 mempool_free(mbox, phba->mbox_mem_pool);
18984                 return -EIO;
18985         }
18986         return 0;
18987 }
18988
18989 /**
18990  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18991  * @phba: pointer to lpfc hba data structure.
18992  *
18993  * This function is the failover routine as a last resort to the FCF DEAD
18994  * event when driver failed to perform fast FCF failover.
18995  **/
18996 void
18997 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18998 {
18999         uint32_t link_state;
19000
19001         /*
19002          * Last resort as FCF DEAD event failover will treat this as
19003          * a link down, but save the link state because we don't want
19004          * it to be changed to Link Down unless it is already down.
19005          */
19006         link_state = phba->link_state;
19007         lpfc_linkdown(phba);
19008         phba->link_state = link_state;
19009
19010         /* Unregister FCF if no devices connected to it */
19011         lpfc_unregister_unused_fcf(phba);
19012 }
19013
19014 /**
19015  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19016  * @phba: pointer to lpfc hba data structure.
19017  * @rgn23_data: pointer to configure region 23 data.
19018  *
19019  * This function gets SLI3 port configure region 23 data through memory dump
19020  * mailbox command. When it successfully retrieves data, the size of the data
19021  * will be returned, otherwise, 0 will be returned.
19022  **/
19023 static uint32_t
19024 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19025 {
19026         LPFC_MBOXQ_t *pmb = NULL;
19027         MAILBOX_t *mb;
19028         uint32_t offset = 0;
19029         int rc;
19030
19031         if (!rgn23_data)
19032                 return 0;
19033
19034         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19035         if (!pmb) {
19036                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19037                                 "2600 failed to allocate mailbox memory\n");
19038                 return 0;
19039         }
19040         mb = &pmb->u.mb;
19041
19042         do {
19043                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19044                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19045
19046                 if (rc != MBX_SUCCESS) {
19047                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19048                                         "2601 failed to read config "
19049                                         "region 23, rc 0x%x Status 0x%x\n",
19050                                         rc, mb->mbxStatus);
19051                         mb->un.varDmp.word_cnt = 0;
19052                 }
19053                 /*
19054                  * dump mem may return a zero when finished or we got a
19055                  * mailbox error, either way we are done.
19056                  */
19057                 if (mb->un.varDmp.word_cnt == 0)
19058                         break;
19059                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19060                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19061
19062                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19063                                        rgn23_data + offset,
19064                                        mb->un.varDmp.word_cnt);
19065                 offset += mb->un.varDmp.word_cnt;
19066         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19067
19068         mempool_free(pmb, phba->mbox_mem_pool);
19069         return offset;
19070 }
19071
19072 /**
19073  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19074  * @phba: pointer to lpfc hba data structure.
19075  * @rgn23_data: pointer to configure region 23 data.
19076  *
19077  * This function gets SLI4 port configure region 23 data through memory dump
19078  * mailbox command. When it successfully retrieves data, the size of the data
19079  * will be returned, otherwise, 0 will be returned.
19080  **/
19081 static uint32_t
19082 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19083 {
19084         LPFC_MBOXQ_t *mboxq = NULL;
19085         struct lpfc_dmabuf *mp = NULL;
19086         struct lpfc_mqe *mqe;
19087         uint32_t data_length = 0;
19088         int rc;
19089
19090         if (!rgn23_data)
19091                 return 0;
19092
19093         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19094         if (!mboxq) {
19095                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19096                                 "3105 failed to allocate mailbox memory\n");
19097                 return 0;
19098         }
19099
19100         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19101                 goto out;
19102         mqe = &mboxq->u.mqe;
19103         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19104         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19105         if (rc)
19106                 goto out;
19107         data_length = mqe->un.mb_words[5];
19108         if (data_length == 0)
19109                 goto out;
19110         if (data_length > DMP_RGN23_SIZE) {
19111                 data_length = 0;
19112                 goto out;
19113         }
19114         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19115 out:
19116         mempool_free(mboxq, phba->mbox_mem_pool);
19117         if (mp) {
19118                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19119                 kfree(mp);
19120         }
19121         return data_length;
19122 }
19123
19124 /**
19125  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19126  * @phba: pointer to lpfc hba data structure.
19127  *
19128  * This function read region 23 and parse TLV for port status to
19129  * decide if the user disaled the port. If the TLV indicates the
19130  * port is disabled, the hba_flag is set accordingly.
19131  **/
19132 void
19133 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19134 {
19135         uint8_t *rgn23_data = NULL;
19136         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19137         uint32_t offset = 0;
19138
19139         /* Get adapter Region 23 data */
19140         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19141         if (!rgn23_data)
19142                 goto out;
19143
19144         if (phba->sli_rev < LPFC_SLI_REV4)
19145                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19146         else {
19147                 if_type = bf_get(lpfc_sli_intf_if_type,
19148                                  &phba->sli4_hba.sli_intf);
19149                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19150                         goto out;
19151                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19152         }
19153
19154         if (!data_size)
19155                 goto out;
19156
19157         /* Check the region signature first */
19158         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19159                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19160                         "2619 Config region 23 has bad signature\n");
19161                         goto out;
19162         }
19163         offset += 4;
19164
19165         /* Check the data structure version */
19166         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19167                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19168                         "2620 Config region 23 has bad version\n");
19169                 goto out;
19170         }
19171         offset += 4;
19172
19173         /* Parse TLV entries in the region */
19174         while (offset < data_size) {
19175                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19176                         break;
19177                 /*
19178                  * If the TLV is not driver specific TLV or driver id is
19179                  * not linux driver id, skip the record.
19180                  */
19181                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19182                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19183                     (rgn23_data[offset + 3] != 0)) {
19184                         offset += rgn23_data[offset + 1] * 4 + 4;
19185                         continue;
19186                 }
19187
19188                 /* Driver found a driver specific TLV in the config region */
19189                 sub_tlv_len = rgn23_data[offset + 1] * 4;
19190                 offset += 4;
19191                 tlv_offset = 0;
19192
19193                 /*
19194                  * Search for configured port state sub-TLV.
19195                  */
19196                 while ((offset < data_size) &&
19197                         (tlv_offset < sub_tlv_len)) {
19198                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19199                                 offset += 4;
19200                                 tlv_offset += 4;
19201                                 break;
19202                         }
19203                         if (rgn23_data[offset] != PORT_STE_TYPE) {
19204                                 offset += rgn23_data[offset + 1] * 4 + 4;
19205                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19206                                 continue;
19207                         }
19208
19209                         /* This HBA contains PORT_STE configured */
19210                         if (!rgn23_data[offset + 2])
19211                                 phba->hba_flag |= LINK_DISABLED;
19212
19213                         goto out;
19214                 }
19215         }
19216
19217 out:
19218         kfree(rgn23_data);
19219         return;
19220 }
19221
19222 /**
19223  * lpfc_wr_object - write an object to the firmware
19224  * @phba: HBA structure that indicates port to create a queue on.
19225  * @dmabuf_list: list of dmabufs to write to the port.
19226  * @size: the total byte value of the objects to write to the port.
19227  * @offset: the current offset to be used to start the transfer.
19228  *
19229  * This routine will create a wr_object mailbox command to send to the port.
19230  * the mailbox command will be constructed using the dma buffers described in
19231  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19232  * BDEs that the imbedded mailbox can support. The @offset variable will be
19233  * used to indicate the starting offset of the transfer and will also return
19234  * the offset after the write object mailbox has completed. @size is used to
19235  * determine the end of the object and whether the eof bit should be set.
19236  *
19237  * Return 0 is successful and offset will contain the the new offset to use
19238  * for the next write.
19239  * Return negative value for error cases.
19240  **/
19241 int
19242 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19243                uint32_t size, uint32_t *offset)
19244 {
19245         struct lpfc_mbx_wr_object *wr_object;
19246         LPFC_MBOXQ_t *mbox;
19247         int rc = 0, i = 0;
19248         uint32_t shdr_status, shdr_add_status, shdr_change_status;
19249         uint32_t mbox_tmo;
19250         struct lpfc_dmabuf *dmabuf;
19251         uint32_t written = 0;
19252         bool check_change_status = false;
19253
19254         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19255         if (!mbox)
19256                 return -ENOMEM;
19257
19258         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19259                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
19260                         sizeof(struct lpfc_mbx_wr_object) -
19261                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19262
19263         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19264         wr_object->u.request.write_offset = *offset;
19265         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19266         wr_object->u.request.object_name[0] =
19267                 cpu_to_le32(wr_object->u.request.object_name[0]);
19268         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19269         list_for_each_entry(dmabuf, dmabuf_list, list) {
19270                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19271                         break;
19272                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19273                 wr_object->u.request.bde[i].addrHigh =
19274                         putPaddrHigh(dmabuf->phys);
19275                 if (written + SLI4_PAGE_SIZE >= size) {
19276                         wr_object->u.request.bde[i].tus.f.bdeSize =
19277                                 (size - written);
19278                         written += (size - written);
19279                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19280                         bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19281                         check_change_status = true;
19282                 } else {
19283                         wr_object->u.request.bde[i].tus.f.bdeSize =
19284                                 SLI4_PAGE_SIZE;
19285                         written += SLI4_PAGE_SIZE;
19286                 }
19287                 i++;
19288         }
19289         wr_object->u.request.bde_count = i;
19290         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19291         if (!phba->sli4_hba.intr_enable)
19292                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19293         else {
19294                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19295                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19296         }
19297         /* The IOCTL status is embedded in the mailbox subheader. */
19298         shdr_status = bf_get(lpfc_mbox_hdr_status,
19299                              &wr_object->header.cfg_shdr.response);
19300         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19301                                  &wr_object->header.cfg_shdr.response);
19302         if (check_change_status) {
19303                 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19304                                             &wr_object->u.response);
19305                 switch (shdr_change_status) {
19306                 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19307                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19308                                         "3198 Firmware write complete: System "
19309                                         "reboot required to instantiate\n");
19310                         break;
19311                 case (LPFC_CHANGE_STATUS_FW_RESET):
19312                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19313                                         "3199 Firmware write complete: Firmware"
19314                                         " reset required to instantiate\n");
19315                         break;
19316                 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19317                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19318                                         "3200 Firmware write complete: Port "
19319                                         "Migration or PCI Reset required to "
19320                                         "instantiate\n");
19321                         break;
19322                 case (LPFC_CHANGE_STATUS_PCI_RESET):
19323                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19324                                         "3201 Firmware write complete: PCI "
19325                                         "Reset required to instantiate\n");
19326                         break;
19327                 default:
19328                         break;
19329                 }
19330         }
19331         if (rc != MBX_TIMEOUT)
19332                 mempool_free(mbox, phba->mbox_mem_pool);
19333         if (shdr_status || shdr_add_status || rc) {
19334                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19335                                 "3025 Write Object mailbox failed with "
19336                                 "status x%x add_status x%x, mbx status x%x\n",
19337                                 shdr_status, shdr_add_status, rc);
19338                 rc = -ENXIO;
19339                 *offset = shdr_add_status;
19340         } else
19341                 *offset += wr_object->u.response.actual_write_length;
19342         return rc;
19343 }
19344
19345 /**
19346  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19347  * @vport: pointer to vport data structure.
19348  *
19349  * This function iterate through the mailboxq and clean up all REG_LOGIN
19350  * and REG_VPI mailbox commands associated with the vport. This function
19351  * is called when driver want to restart discovery of the vport due to
19352  * a Clear Virtual Link event.
19353  **/
19354 void
19355 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19356 {
19357         struct lpfc_hba *phba = vport->phba;
19358         LPFC_MBOXQ_t *mb, *nextmb;
19359         struct lpfc_dmabuf *mp;
19360         struct lpfc_nodelist *ndlp;
19361         struct lpfc_nodelist *act_mbx_ndlp = NULL;
19362         struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
19363         LIST_HEAD(mbox_cmd_list);
19364         uint8_t restart_loop;
19365
19366         /* Clean up internally queued mailbox commands with the vport */
19367         spin_lock_irq(&phba->hbalock);
19368         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19369                 if (mb->vport != vport)
19370                         continue;
19371
19372                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19373                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
19374                         continue;
19375
19376                 list_del(&mb->list);
19377                 list_add_tail(&mb->list, &mbox_cmd_list);
19378         }
19379         /* Clean up active mailbox command with the vport */
19380         mb = phba->sli.mbox_active;
19381         if (mb && (mb->vport == vport)) {
19382                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19383                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
19384                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19385                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19386                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19387                         /* Put reference count for delayed processing */
19388                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19389                         /* Unregister the RPI when mailbox complete */
19390                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19391                 }
19392         }
19393         /* Cleanup any mailbox completions which are not yet processed */
19394         do {
19395                 restart_loop = 0;
19396                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19397                         /*
19398                          * If this mailox is already processed or it is
19399                          * for another vport ignore it.
19400                          */
19401                         if ((mb->vport != vport) ||
19402                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19403                                 continue;
19404
19405                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19406                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19407                                 continue;
19408
19409                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19410                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19411                                 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19412                                 /* Unregister the RPI when mailbox complete */
19413                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19414                                 restart_loop = 1;
19415                                 spin_unlock_irq(&phba->hbalock);
19416                                 spin_lock(shost->host_lock);
19417                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19418                                 spin_unlock(shost->host_lock);
19419                                 spin_lock_irq(&phba->hbalock);
19420                                 break;
19421                         }
19422                 }
19423         } while (restart_loop);
19424
19425         spin_unlock_irq(&phba->hbalock);
19426
19427         /* Release the cleaned-up mailbox commands */
19428         while (!list_empty(&mbox_cmd_list)) {
19429                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19430                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19431                         mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19432                         if (mp) {
19433                                 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19434                                 kfree(mp);
19435                         }
19436                         mb->ctx_buf = NULL;
19437                         ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19438                         mb->ctx_ndlp = NULL;
19439                         if (ndlp) {
19440                                 spin_lock(shost->host_lock);
19441                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19442                                 spin_unlock(shost->host_lock);
19443                                 lpfc_nlp_put(ndlp);
19444                         }
19445                 }
19446                 mempool_free(mb, phba->mbox_mem_pool);
19447         }
19448
19449         /* Release the ndlp with the cleaned-up active mailbox command */
19450         if (act_mbx_ndlp) {
19451                 spin_lock(shost->host_lock);
19452                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19453                 spin_unlock(shost->host_lock);
19454                 lpfc_nlp_put(act_mbx_ndlp);
19455         }
19456 }
19457
19458 /**
19459  * lpfc_drain_txq - Drain the txq
19460  * @phba: Pointer to HBA context object.
19461  *
19462  * This function attempt to submit IOCBs on the txq
19463  * to the adapter.  For SLI4 adapters, the txq contains
19464  * ELS IOCBs that have been deferred because the there
19465  * are no SGLs.  This congestion can occur with large
19466  * vport counts during node discovery.
19467  **/
19468
19469 uint32_t
19470 lpfc_drain_txq(struct lpfc_hba *phba)
19471 {
19472         LIST_HEAD(completions);
19473         struct lpfc_sli_ring *pring;
19474         struct lpfc_iocbq *piocbq = NULL;
19475         unsigned long iflags = 0;
19476         char *fail_msg = NULL;
19477         struct lpfc_sglq *sglq;
19478         union lpfc_wqe128 wqe;
19479         uint32_t txq_cnt = 0;
19480         struct lpfc_queue *wq;
19481
19482         if (phba->link_flag & LS_MDS_LOOPBACK) {
19483                 /* MDS WQE are posted only to first WQ*/
19484                 wq = phba->sli4_hba.hdwq[0].io_wq;
19485                 if (unlikely(!wq))
19486                         return 0;
19487                 pring = wq->pring;
19488         } else {
19489                 wq = phba->sli4_hba.els_wq;
19490                 if (unlikely(!wq))
19491                         return 0;
19492                 pring = lpfc_phba_elsring(phba);
19493         }
19494
19495         if (unlikely(!pring) || list_empty(&pring->txq))
19496                 return 0;
19497
19498         spin_lock_irqsave(&pring->ring_lock, iflags);
19499         list_for_each_entry(piocbq, &pring->txq, list) {
19500                 txq_cnt++;
19501         }
19502
19503         if (txq_cnt > pring->txq_max)
19504                 pring->txq_max = txq_cnt;
19505
19506         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19507
19508         while (!list_empty(&pring->txq)) {
19509                 spin_lock_irqsave(&pring->ring_lock, iflags);
19510
19511                 piocbq = lpfc_sli_ringtx_get(phba, pring);
19512                 if (!piocbq) {
19513                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19514                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19515                                 "2823 txq empty and txq_cnt is %d\n ",
19516                                 txq_cnt);
19517                         break;
19518                 }
19519                 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19520                 if (!sglq) {
19521                         __lpfc_sli_ringtx_put(phba, pring, piocbq);
19522                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19523                         break;
19524                 }
19525                 txq_cnt--;
19526
19527                 /* The xri and iocb resources secured,
19528                  * attempt to issue request
19529                  */
19530                 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19531                 piocbq->sli4_xritag = sglq->sli4_xritag;
19532                 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19533                         fail_msg = "to convert bpl to sgl";
19534                 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19535                         fail_msg = "to convert iocb to wqe";
19536                 else if (lpfc_sli4_wq_put(wq, &wqe))
19537                         fail_msg = " - Wq is full";
19538                 else
19539                         lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19540
19541                 if (fail_msg) {
19542                         /* Failed means we can't issue and need to cancel */
19543                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19544                                         "2822 IOCB failed %s iotag 0x%x "
19545                                         "xri 0x%x\n",
19546                                         fail_msg,
19547                                         piocbq->iotag, piocbq->sli4_xritag);
19548                         list_add_tail(&piocbq->list, &completions);
19549                 }
19550                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19551         }
19552
19553         /* Cancel all the IOCBs that cannot be issued */
19554         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19555                                 IOERR_SLI_ABORTED);
19556
19557         return txq_cnt;
19558 }
19559
19560 /**
19561  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19562  * @phba: Pointer to HBA context object.
19563  * @pwqe: Pointer to command WQE.
19564  * @sglq: Pointer to the scatter gather queue object.
19565  *
19566  * This routine converts the bpl or bde that is in the WQE
19567  * to a sgl list for the sli4 hardware. The physical address
19568  * of the bpl/bde is converted back to a virtual address.
19569  * If the WQE contains a BPL then the list of BDE's is
19570  * converted to sli4_sge's. If the WQE contains a single
19571  * BDE then it is converted to a single sli_sge.
19572  * The WQE is still in cpu endianness so the contents of
19573  * the bpl can be used without byte swapping.
19574  *
19575  * Returns valid XRI = Success, NO_XRI = Failure.
19576  */
19577 static uint16_t
19578 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19579                  struct lpfc_sglq *sglq)
19580 {
19581         uint16_t xritag = NO_XRI;
19582         struct ulp_bde64 *bpl = NULL;
19583         struct ulp_bde64 bde;
19584         struct sli4_sge *sgl  = NULL;
19585         struct lpfc_dmabuf *dmabuf;
19586         union lpfc_wqe128 *wqe;
19587         int numBdes = 0;
19588         int i = 0;
19589         uint32_t offset = 0; /* accumulated offset in the sg request list */
19590         int inbound = 0; /* number of sg reply entries inbound from firmware */
19591         uint32_t cmd;
19592
19593         if (!pwqeq || !sglq)
19594                 return xritag;
19595
19596         sgl  = (struct sli4_sge *)sglq->sgl;
19597         wqe = &pwqeq->wqe;
19598         pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19599
19600         cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19601         if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19602                 return sglq->sli4_xritag;
19603         numBdes = pwqeq->rsvd2;
19604         if (numBdes) {
19605                 /* The addrHigh and addrLow fields within the WQE
19606                  * have not been byteswapped yet so there is no
19607                  * need to swap them back.
19608                  */
19609                 if (pwqeq->context3)
19610                         dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19611                 else
19612                         return xritag;
19613
19614                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
19615                 if (!bpl)
19616                         return xritag;
19617
19618                 for (i = 0; i < numBdes; i++) {
19619                         /* Should already be byte swapped. */
19620                         sgl->addr_hi = bpl->addrHigh;
19621                         sgl->addr_lo = bpl->addrLow;
19622
19623                         sgl->word2 = le32_to_cpu(sgl->word2);
19624                         if ((i+1) == numBdes)
19625                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
19626                         else
19627                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
19628                         /* swap the size field back to the cpu so we
19629                          * can assign it to the sgl.
19630                          */
19631                         bde.tus.w = le32_to_cpu(bpl->tus.w);
19632                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19633                         /* The offsets in the sgl need to be accumulated
19634                          * separately for the request and reply lists.
19635                          * The request is always first, the reply follows.
19636                          */
19637                         switch (cmd) {
19638                         case CMD_GEN_REQUEST64_WQE:
19639                                 /* add up the reply sg entries */
19640                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19641                                         inbound++;
19642                                 /* first inbound? reset the offset */
19643                                 if (inbound == 1)
19644                                         offset = 0;
19645                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19646                                 bf_set(lpfc_sli4_sge_type, sgl,
19647                                         LPFC_SGE_TYPE_DATA);
19648                                 offset += bde.tus.f.bdeSize;
19649                                 break;
19650                         case CMD_FCP_TRSP64_WQE:
19651                                 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19652                                 bf_set(lpfc_sli4_sge_type, sgl,
19653                                         LPFC_SGE_TYPE_DATA);
19654                                 break;
19655                         case CMD_FCP_TSEND64_WQE:
19656                         case CMD_FCP_TRECEIVE64_WQE:
19657                                 bf_set(lpfc_sli4_sge_type, sgl,
19658                                         bpl->tus.f.bdeFlags);
19659                                 if (i < 3)
19660                                         offset = 0;
19661                                 else
19662                                         offset += bde.tus.f.bdeSize;
19663                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19664                                 break;
19665                         }
19666                         sgl->word2 = cpu_to_le32(sgl->word2);
19667                         bpl++;
19668                         sgl++;
19669                 }
19670         } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19671                 /* The addrHigh and addrLow fields of the BDE have not
19672                  * been byteswapped yet so they need to be swapped
19673                  * before putting them in the sgl.
19674                  */
19675                 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19676                 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19677                 sgl->word2 = le32_to_cpu(sgl->word2);
19678                 bf_set(lpfc_sli4_sge_last, sgl, 1);
19679                 sgl->word2 = cpu_to_le32(sgl->word2);
19680                 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19681         }
19682         return sglq->sli4_xritag;
19683 }
19684
19685 /**
19686  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19687  * @phba: Pointer to HBA context object.
19688  * @ring_number: Base sli ring number
19689  * @pwqe: Pointer to command WQE.
19690  **/
19691 int
19692 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19693                     struct lpfc_iocbq *pwqe)
19694 {
19695         union lpfc_wqe128 *wqe = &pwqe->wqe;
19696         struct lpfc_nvmet_rcv_ctx *ctxp;
19697         struct lpfc_queue *wq;
19698         struct lpfc_sglq *sglq;
19699         struct lpfc_sli_ring *pring;
19700         unsigned long iflags;
19701         uint32_t ret = 0;
19702
19703         /* NVME_LS and NVME_LS ABTS requests. */
19704         if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19705                 pring =  phba->sli4_hba.nvmels_wq->pring;
19706                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19707                                           qp, wq_access);
19708                 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19709                 if (!sglq) {
19710                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19711                         return WQE_BUSY;
19712                 }
19713                 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19714                 pwqe->sli4_xritag = sglq->sli4_xritag;
19715                 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19716                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19717                         return WQE_ERROR;
19718                 }
19719                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19720                        pwqe->sli4_xritag);
19721                 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19722                 if (ret) {
19723                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19724                         return ret;
19725                 }
19726
19727                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19728                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19729                 return 0;
19730         }
19731
19732         /* NVME_FCREQ and NVME_ABTS requests */
19733         if (pwqe->iocb_flag & LPFC_IO_NVME) {
19734                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19735                 wq = qp->io_wq;
19736                 pring = wq->pring;
19737
19738                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19739
19740                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19741                                           qp, wq_access);
19742                 ret = lpfc_sli4_wq_put(wq, wqe);
19743                 if (ret) {
19744                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19745                         return ret;
19746                 }
19747                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19748                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19749                 return 0;
19750         }
19751
19752         /* NVMET requests */
19753         if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19754                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19755                 wq = qp->io_wq;
19756                 pring = wq->pring;
19757
19758                 ctxp = pwqe->context2;
19759                 sglq = ctxp->ctxbuf->sglq;
19760                 if (pwqe->sli4_xritag ==  NO_XRI) {
19761                         pwqe->sli4_lxritag = sglq->sli4_lxritag;
19762                         pwqe->sli4_xritag = sglq->sli4_xritag;
19763                 }
19764                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19765                        pwqe->sli4_xritag);
19766                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19767
19768                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19769                                           qp, wq_access);
19770                 ret = lpfc_sli4_wq_put(wq, wqe);
19771                 if (ret) {
19772                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
19773                         return ret;
19774                 }
19775                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19776                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19777                 return 0;
19778         }
19779         return WQE_ERROR;
19780 }
19781
19782 #ifdef LPFC_MXP_STAT
19783 /**
19784  * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19785  * @phba: pointer to lpfc hba data structure.
19786  * @hwqid: belong to which HWQ.
19787  *
19788  * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19789  * 15 seconds after a test case is running.
19790  *
19791  * The user should call lpfc_debugfs_multixripools_write before running a test
19792  * case to clear stat_snapshot_taken. Then the user starts a test case. During
19793  * test case is running, stat_snapshot_taken is incremented by 1 every time when
19794  * this routine is called from heartbeat timer. When stat_snapshot_taken is
19795  * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19796  **/
19797 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19798 {
19799         struct lpfc_sli4_hdw_queue *qp;
19800         struct lpfc_multixri_pool *multixri_pool;
19801         struct lpfc_pvt_pool *pvt_pool;
19802         struct lpfc_pbl_pool *pbl_pool;
19803         u32 txcmplq_cnt;
19804
19805         qp = &phba->sli4_hba.hdwq[hwqid];
19806         multixri_pool = qp->p_multixri_pool;
19807         if (!multixri_pool)
19808                 return;
19809
19810         if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19811                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19812                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19813                 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
19814
19815                 multixri_pool->stat_pbl_count = pbl_pool->count;
19816                 multixri_pool->stat_pvt_count = pvt_pool->count;
19817                 multixri_pool->stat_busy_count = txcmplq_cnt;
19818         }
19819
19820         multixri_pool->stat_snapshot_taken++;
19821 }
19822 #endif
19823
19824 /**
19825  * lpfc_adjust_pvt_pool_count - Adjust private pool count
19826  * @phba: pointer to lpfc hba data structure.
19827  * @hwqid: belong to which HWQ.
19828  *
19829  * This routine moves some XRIs from private to public pool when private pool
19830  * is not busy.
19831  **/
19832 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19833 {
19834         struct lpfc_multixri_pool *multixri_pool;
19835         u32 io_req_count;
19836         u32 prev_io_req_count;
19837
19838         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19839         if (!multixri_pool)
19840                 return;
19841         io_req_count = multixri_pool->io_req_count;
19842         prev_io_req_count = multixri_pool->prev_io_req_count;
19843
19844         if (prev_io_req_count != io_req_count) {
19845                 /* Private pool is busy */
19846                 multixri_pool->prev_io_req_count = io_req_count;
19847         } else {
19848                 /* Private pool is not busy.
19849                  * Move XRIs from private to public pool.
19850                  */
19851                 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19852         }
19853 }
19854
19855 /**
19856  * lpfc_adjust_high_watermark - Adjust high watermark
19857  * @phba: pointer to lpfc hba data structure.
19858  * @hwqid: belong to which HWQ.
19859  *
19860  * This routine sets high watermark as number of outstanding XRIs,
19861  * but make sure the new value is between xri_limit/2 and xri_limit.
19862  **/
19863 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19864 {
19865         u32 new_watermark;
19866         u32 watermark_max;
19867         u32 watermark_min;
19868         u32 xri_limit;
19869         u32 txcmplq_cnt;
19870         u32 abts_io_bufs;
19871         struct lpfc_multixri_pool *multixri_pool;
19872         struct lpfc_sli4_hdw_queue *qp;
19873
19874         qp = &phba->sli4_hba.hdwq[hwqid];
19875         multixri_pool = qp->p_multixri_pool;
19876         if (!multixri_pool)
19877                 return;
19878         xri_limit = multixri_pool->xri_limit;
19879
19880         watermark_max = xri_limit;
19881         watermark_min = xri_limit / 2;
19882
19883         txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
19884         abts_io_bufs = qp->abts_scsi_io_bufs;
19885         abts_io_bufs += qp->abts_nvme_io_bufs;
19886
19887         new_watermark = txcmplq_cnt + abts_io_bufs;
19888         new_watermark = min(watermark_max, new_watermark);
19889         new_watermark = max(watermark_min, new_watermark);
19890         multixri_pool->pvt_pool.high_watermark = new_watermark;
19891
19892 #ifdef LPFC_MXP_STAT
19893         multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
19894                                           new_watermark);
19895 #endif
19896 }
19897
19898 /**
19899  * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
19900  * @phba: pointer to lpfc hba data structure.
19901  * @hwqid: belong to which HWQ.
19902  *
19903  * This routine is called from hearbeat timer when pvt_pool is idle.
19904  * All free XRIs are moved from private to public pool on hwqid with 2 steps.
19905  * The first step moves (all - low_watermark) amount of XRIs.
19906  * The second step moves the rest of XRIs.
19907  **/
19908 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
19909 {
19910         struct lpfc_pbl_pool *pbl_pool;
19911         struct lpfc_pvt_pool *pvt_pool;
19912         struct lpfc_sli4_hdw_queue *qp;
19913         struct lpfc_io_buf *lpfc_ncmd;
19914         struct lpfc_io_buf *lpfc_ncmd_next;
19915         unsigned long iflag;
19916         struct list_head tmp_list;
19917         u32 tmp_count;
19918
19919         qp = &phba->sli4_hba.hdwq[hwqid];
19920         pbl_pool = &qp->p_multixri_pool->pbl_pool;
19921         pvt_pool = &qp->p_multixri_pool->pvt_pool;
19922         tmp_count = 0;
19923
19924         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
19925         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
19926
19927         if (pvt_pool->count > pvt_pool->low_watermark) {
19928                 /* Step 1: move (all - low_watermark) from pvt_pool
19929                  * to pbl_pool
19930                  */
19931
19932                 /* Move low watermark of bufs from pvt_pool to tmp_list */
19933                 INIT_LIST_HEAD(&tmp_list);
19934                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
19935                                          &pvt_pool->list, list) {
19936                         list_move_tail(&lpfc_ncmd->list, &tmp_list);
19937                         tmp_count++;
19938                         if (tmp_count >= pvt_pool->low_watermark)
19939                                 break;
19940                 }
19941
19942                 /* Move all bufs from pvt_pool to pbl_pool */
19943                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19944
19945                 /* Move all bufs from tmp_list to pvt_pool */
19946                 list_splice(&tmp_list, &pvt_pool->list);
19947
19948                 pbl_pool->count += (pvt_pool->count - tmp_count);
19949                 pvt_pool->count = tmp_count;
19950         } else {
19951                 /* Step 2: move the rest from pvt_pool to pbl_pool */
19952                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19953                 pbl_pool->count += pvt_pool->count;
19954                 pvt_pool->count = 0;
19955         }
19956
19957         spin_unlock(&pvt_pool->lock);
19958         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
19959 }
19960
19961 /**
19962  * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
19963  * @phba: pointer to lpfc hba data structure
19964  * @pbl_pool: specified public free XRI pool
19965  * @pvt_pool: specified private free XRI pool
19966  * @count: number of XRIs to move
19967  *
19968  * This routine tries to move some free common bufs from the specified pbl_pool
19969  * to the specified pvt_pool. It might move less than count XRIs if there's not
19970  * enough in public pool.
19971  *
19972  * Return:
19973  *   true - if XRIs are successfully moved from the specified pbl_pool to the
19974  *          specified pvt_pool
19975  *   false - if the specified pbl_pool is empty or locked by someone else
19976  **/
19977 static bool
19978 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19979                           struct lpfc_pbl_pool *pbl_pool,
19980                           struct lpfc_pvt_pool *pvt_pool, u32 count)
19981 {
19982         struct lpfc_io_buf *lpfc_ncmd;
19983         struct lpfc_io_buf *lpfc_ncmd_next;
19984         unsigned long iflag;
19985         int ret;
19986
19987         ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
19988         if (ret) {
19989                 if (pbl_pool->count) {
19990                         /* Move a batch of XRIs from public to private pool */
19991                         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
19992                         list_for_each_entry_safe(lpfc_ncmd,
19993                                                  lpfc_ncmd_next,
19994                                                  &pbl_pool->list,
19995                                                  list) {
19996                                 list_move_tail(&lpfc_ncmd->list,
19997                                                &pvt_pool->list);
19998                                 pvt_pool->count++;
19999                                 pbl_pool->count--;
20000                                 count--;
20001                                 if (count == 0)
20002                                         break;
20003                         }
20004
20005                         spin_unlock(&pvt_pool->lock);
20006                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20007                         return true;
20008                 }
20009                 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20010         }
20011
20012         return false;
20013 }
20014
20015 /**
20016  * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20017  * @phba: pointer to lpfc hba data structure.
20018  * @hwqid: belong to which HWQ.
20019  * @count: number of XRIs to move
20020  *
20021  * This routine tries to find some free common bufs in one of public pools with
20022  * Round Robin method. The search always starts from local hwqid, then the next
20023  * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20024  * a batch of free common bufs are moved to private pool on hwqid.
20025  * It might move less than count XRIs if there's not enough in public pool.
20026  **/
20027 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20028 {
20029         struct lpfc_multixri_pool *multixri_pool;
20030         struct lpfc_multixri_pool *next_multixri_pool;
20031         struct lpfc_pvt_pool *pvt_pool;
20032         struct lpfc_pbl_pool *pbl_pool;
20033         struct lpfc_sli4_hdw_queue *qp;
20034         u32 next_hwqid;
20035         u32 hwq_count;
20036         int ret;
20037
20038         qp = &phba->sli4_hba.hdwq[hwqid];
20039         multixri_pool = qp->p_multixri_pool;
20040         pvt_pool = &multixri_pool->pvt_pool;
20041         pbl_pool = &multixri_pool->pbl_pool;
20042
20043         /* Check if local pbl_pool is available */
20044         ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20045         if (ret) {
20046 #ifdef LPFC_MXP_STAT
20047                 multixri_pool->local_pbl_hit_count++;
20048 #endif
20049                 return;
20050         }
20051
20052         hwq_count = phba->cfg_hdw_queue;
20053
20054         /* Get the next hwqid which was found last time */
20055         next_hwqid = multixri_pool->rrb_next_hwqid;
20056
20057         do {
20058                 /* Go to next hwq */
20059                 next_hwqid = (next_hwqid + 1) % hwq_count;
20060
20061                 next_multixri_pool =
20062                         phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20063                 pbl_pool = &next_multixri_pool->pbl_pool;
20064
20065                 /* Check if the public free xri pool is available */
20066                 ret = _lpfc_move_xri_pbl_to_pvt(
20067                         phba, qp, pbl_pool, pvt_pool, count);
20068
20069                 /* Exit while-loop if success or all hwqid are checked */
20070         } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20071
20072         /* Starting point for the next time */
20073         multixri_pool->rrb_next_hwqid = next_hwqid;
20074
20075         if (!ret) {
20076                 /* stats: all public pools are empty*/
20077                 multixri_pool->pbl_empty_count++;
20078         }
20079
20080 #ifdef LPFC_MXP_STAT
20081         if (ret) {
20082                 if (next_hwqid == hwqid)
20083                         multixri_pool->local_pbl_hit_count++;
20084                 else
20085                         multixri_pool->other_pbl_hit_count++;
20086         }
20087 #endif
20088 }
20089
20090 /**
20091  * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20092  * @phba: pointer to lpfc hba data structure.
20093  * @qp: belong to which HWQ.
20094  *
20095  * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20096  * low watermark.
20097  **/
20098 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20099 {
20100         struct lpfc_multixri_pool *multixri_pool;
20101         struct lpfc_pvt_pool *pvt_pool;
20102
20103         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20104         pvt_pool = &multixri_pool->pvt_pool;
20105
20106         if (pvt_pool->count < pvt_pool->low_watermark)
20107                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20108 }
20109
20110 /**
20111  * lpfc_release_io_buf - Return one IO buf back to free pool
20112  * @phba: pointer to lpfc hba data structure.
20113  * @lpfc_ncmd: IO buf to be returned.
20114  * @qp: belong to which HWQ.
20115  *
20116  * This routine returns one IO buf back to free pool. If this is an urgent IO,
20117  * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20118  * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20119  * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
20120  * lpfc_io_buf_list_put.
20121  **/
20122 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20123                          struct lpfc_sli4_hdw_queue *qp)
20124 {
20125         unsigned long iflag;
20126         struct lpfc_pbl_pool *pbl_pool;
20127         struct lpfc_pvt_pool *pvt_pool;
20128         struct lpfc_epd_pool *epd_pool;
20129         u32 txcmplq_cnt;
20130         u32 xri_owned;
20131         u32 xri_limit;
20132         u32 abts_io_bufs;
20133
20134         /* MUST zero fields if buffer is reused by another protocol */
20135         lpfc_ncmd->nvmeCmd = NULL;
20136         lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20137         lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20138
20139         if (phba->cfg_xpsgl && !phba->nvmet_support &&
20140             !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20141                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20142
20143         if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20144                 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20145
20146         if (phba->cfg_xri_rebalancing) {
20147                 if (lpfc_ncmd->expedite) {
20148                         /* Return to expedite pool */
20149                         epd_pool = &phba->epd_pool;
20150                         spin_lock_irqsave(&epd_pool->lock, iflag);
20151                         list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20152                         epd_pool->count++;
20153                         spin_unlock_irqrestore(&epd_pool->lock, iflag);
20154                         return;
20155                 }
20156
20157                 /* Avoid invalid access if an IO sneaks in and is being rejected
20158                  * just _after_ xri pools are destroyed in lpfc_offline.
20159                  * Nothing much can be done at this point.
20160                  */
20161                 if (!qp->p_multixri_pool)
20162                         return;
20163
20164                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20165                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20166
20167                 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20168                 abts_io_bufs = qp->abts_scsi_io_bufs;
20169                 abts_io_bufs += qp->abts_nvme_io_bufs;
20170
20171                 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20172                 xri_limit = qp->p_multixri_pool->xri_limit;
20173
20174 #ifdef LPFC_MXP_STAT
20175                 if (xri_owned <= xri_limit)
20176                         qp->p_multixri_pool->below_limit_count++;
20177                 else
20178                         qp->p_multixri_pool->above_limit_count++;
20179 #endif
20180
20181                 /* XRI goes to either public or private free xri pool
20182                  *     based on watermark and xri_limit
20183                  */
20184                 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20185                     (xri_owned < xri_limit &&
20186                      pvt_pool->count < pvt_pool->high_watermark)) {
20187                         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20188                                                   qp, free_pvt_pool);
20189                         list_add_tail(&lpfc_ncmd->list,
20190                                       &pvt_pool->list);
20191                         pvt_pool->count++;
20192                         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20193                 } else {
20194                         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20195                                                   qp, free_pub_pool);
20196                         list_add_tail(&lpfc_ncmd->list,
20197                                       &pbl_pool->list);
20198                         pbl_pool->count++;
20199                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20200                 }
20201         } else {
20202                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20203                                           qp, free_xri);
20204                 list_add_tail(&lpfc_ncmd->list,
20205                               &qp->lpfc_io_buf_list_put);
20206                 qp->put_io_bufs++;
20207                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20208                                        iflag);
20209         }
20210 }
20211
20212 /**
20213  * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20214  * @phba: pointer to lpfc hba data structure.
20215  * @pvt_pool: pointer to private pool data structure.
20216  * @ndlp: pointer to lpfc nodelist data structure.
20217  *
20218  * This routine tries to get one free IO buf from private pool.
20219  *
20220  * Return:
20221  *   pointer to one free IO buf - if private pool is not empty
20222  *   NULL - if private pool is empty
20223  **/
20224 static struct lpfc_io_buf *
20225 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20226                                   struct lpfc_sli4_hdw_queue *qp,
20227                                   struct lpfc_pvt_pool *pvt_pool,
20228                                   struct lpfc_nodelist *ndlp)
20229 {
20230         struct lpfc_io_buf *lpfc_ncmd;
20231         struct lpfc_io_buf *lpfc_ncmd_next;
20232         unsigned long iflag;
20233
20234         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20235         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20236                                  &pvt_pool->list, list) {
20237                 if (lpfc_test_rrq_active(
20238                         phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20239                         continue;
20240                 list_del(&lpfc_ncmd->list);
20241                 pvt_pool->count--;
20242                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20243                 return lpfc_ncmd;
20244         }
20245         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20246
20247         return NULL;
20248 }
20249
20250 /**
20251  * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20252  * @phba: pointer to lpfc hba data structure.
20253  *
20254  * This routine tries to get one free IO buf from expedite pool.
20255  *
20256  * Return:
20257  *   pointer to one free IO buf - if expedite pool is not empty
20258  *   NULL - if expedite pool is empty
20259  **/
20260 static struct lpfc_io_buf *
20261 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20262 {
20263         struct lpfc_io_buf *lpfc_ncmd;
20264         struct lpfc_io_buf *lpfc_ncmd_next;
20265         unsigned long iflag;
20266         struct lpfc_epd_pool *epd_pool;
20267
20268         epd_pool = &phba->epd_pool;
20269         lpfc_ncmd = NULL;
20270
20271         spin_lock_irqsave(&epd_pool->lock, iflag);
20272         if (epd_pool->count > 0) {
20273                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20274                                          &epd_pool->list, list) {
20275                         list_del(&lpfc_ncmd->list);
20276                         epd_pool->count--;
20277                         break;
20278                 }
20279         }
20280         spin_unlock_irqrestore(&epd_pool->lock, iflag);
20281
20282         return lpfc_ncmd;
20283 }
20284
20285 /**
20286  * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20287  * @phba: pointer to lpfc hba data structure.
20288  * @ndlp: pointer to lpfc nodelist data structure.
20289  * @hwqid: belong to which HWQ
20290  * @expedite: 1 means this request is urgent.
20291  *
20292  * This routine will do the following actions and then return a pointer to
20293  * one free IO buf.
20294  *
20295  * 1. If private free xri count is empty, move some XRIs from public to
20296  *    private pool.
20297  * 2. Get one XRI from private free xri pool.
20298  * 3. If we fail to get one from pvt_pool and this is an expedite request,
20299  *    get one free xri from expedite pool.
20300  *
20301  * Note: ndlp is only used on SCSI side for RRQ testing.
20302  *       The caller should pass NULL for ndlp on NVME side.
20303  *
20304  * Return:
20305  *   pointer to one free IO buf - if private pool is not empty
20306  *   NULL - if private pool is empty
20307  **/
20308 static struct lpfc_io_buf *
20309 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20310                                     struct lpfc_nodelist *ndlp,
20311                                     int hwqid, int expedite)
20312 {
20313         struct lpfc_sli4_hdw_queue *qp;
20314         struct lpfc_multixri_pool *multixri_pool;
20315         struct lpfc_pvt_pool *pvt_pool;
20316         struct lpfc_io_buf *lpfc_ncmd;
20317
20318         qp = &phba->sli4_hba.hdwq[hwqid];
20319         lpfc_ncmd = NULL;
20320         multixri_pool = qp->p_multixri_pool;
20321         pvt_pool = &multixri_pool->pvt_pool;
20322         multixri_pool->io_req_count++;
20323
20324         /* If pvt_pool is empty, move some XRIs from public to private pool */
20325         if (pvt_pool->count == 0)
20326                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20327
20328         /* Get one XRI from private free xri pool */
20329         lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20330
20331         if (lpfc_ncmd) {
20332                 lpfc_ncmd->hdwq = qp;
20333                 lpfc_ncmd->hdwq_no = hwqid;
20334         } else if (expedite) {
20335                 /* If we fail to get one from pvt_pool and this is an expedite
20336                  * request, get one free xri from expedite pool.
20337                  */
20338                 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20339         }
20340
20341         return lpfc_ncmd;
20342 }
20343
20344 static inline struct lpfc_io_buf *
20345 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20346 {
20347         struct lpfc_sli4_hdw_queue *qp;
20348         struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20349
20350         qp = &phba->sli4_hba.hdwq[idx];
20351         list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20352                                  &qp->lpfc_io_buf_list_get, list) {
20353                 if (lpfc_test_rrq_active(phba, ndlp,
20354                                          lpfc_cmd->cur_iocbq.sli4_lxritag))
20355                         continue;
20356
20357                 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20358                         continue;
20359
20360                 list_del_init(&lpfc_cmd->list);
20361                 qp->get_io_bufs--;
20362                 lpfc_cmd->hdwq = qp;
20363                 lpfc_cmd->hdwq_no = idx;
20364                 return lpfc_cmd;
20365         }
20366         return NULL;
20367 }
20368
20369 /**
20370  * lpfc_get_io_buf - Get one IO buffer from free pool
20371  * @phba: The HBA for which this call is being executed.
20372  * @ndlp: pointer to lpfc nodelist data structure.
20373  * @hwqid: belong to which HWQ
20374  * @expedite: 1 means this request is urgent.
20375  *
20376  * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20377  * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20378  * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20379  *
20380  * Note: ndlp is only used on SCSI side for RRQ testing.
20381  *       The caller should pass NULL for ndlp on NVME side.
20382  *
20383  * Return codes:
20384  *   NULL - Error
20385  *   Pointer to lpfc_io_buf - Success
20386  **/
20387 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20388                                     struct lpfc_nodelist *ndlp,
20389                                     u32 hwqid, int expedite)
20390 {
20391         struct lpfc_sli4_hdw_queue *qp;
20392         unsigned long iflag;
20393         struct lpfc_io_buf *lpfc_cmd;
20394
20395         qp = &phba->sli4_hba.hdwq[hwqid];
20396         lpfc_cmd = NULL;
20397
20398         if (phba->cfg_xri_rebalancing)
20399                 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20400                         phba, ndlp, hwqid, expedite);
20401         else {
20402                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20403                                           qp, alloc_xri_get);
20404                 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20405                         lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20406                 if (!lpfc_cmd) {
20407                         lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20408                                           qp, alloc_xri_put);
20409                         list_splice(&qp->lpfc_io_buf_list_put,
20410                                     &qp->lpfc_io_buf_list_get);
20411                         qp->get_io_bufs += qp->put_io_bufs;
20412                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20413                         qp->put_io_bufs = 0;
20414                         spin_unlock(&qp->io_buf_list_put_lock);
20415                         if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20416                             expedite)
20417                                 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20418                 }
20419                 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20420         }
20421
20422         return lpfc_cmd;
20423 }
20424
20425 /**
20426  * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20427  * @phba: The HBA for which this call is being executed.
20428  * @lpfc_buf: IO buf structure to append the SGL chunk
20429  *
20430  * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20431  * and will allocate an SGL chunk if the pool is empty.
20432  *
20433  * Return codes:
20434  *   NULL - Error
20435  *   Pointer to sli4_hybrid_sgl - Success
20436  **/
20437 struct sli4_hybrid_sgl *
20438 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20439 {
20440         struct sli4_hybrid_sgl *list_entry = NULL;
20441         struct sli4_hybrid_sgl *tmp = NULL;
20442         struct sli4_hybrid_sgl *allocated_sgl = NULL;
20443         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20444         struct list_head *buf_list = &hdwq->sgl_list;
20445         unsigned long iflags;
20446
20447         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20448
20449         if (likely(!list_empty(buf_list))) {
20450                 /* break off 1 chunk from the sgl_list */
20451                 list_for_each_entry_safe(list_entry, tmp,
20452                                          buf_list, list_node) {
20453                         list_move_tail(&list_entry->list_node,
20454                                        &lpfc_buf->dma_sgl_xtra_list);
20455                         break;
20456                 }
20457         } else {
20458                 /* allocate more */
20459                 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20460                 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20461                                    cpu_to_node(smp_processor_id()));
20462                 if (!tmp) {
20463                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20464                                         "8353 error kmalloc memory for HDWQ "
20465                                         "%d %s\n",
20466                                         lpfc_buf->hdwq_no, __func__);
20467                         return NULL;
20468                 }
20469
20470                 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20471                                               GFP_ATOMIC, &tmp->dma_phys_sgl);
20472                 if (!tmp->dma_sgl) {
20473                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20474                                         "8354 error pool_alloc memory for HDWQ "
20475                                         "%d %s\n",
20476                                         lpfc_buf->hdwq_no, __func__);
20477                         kfree(tmp);
20478                         return NULL;
20479                 }
20480
20481                 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20482                 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20483         }
20484
20485         allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20486                                         struct sli4_hybrid_sgl,
20487                                         list_node);
20488
20489         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20490
20491         return allocated_sgl;
20492 }
20493
20494 /**
20495  * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
20496  * @phba: The HBA for which this call is being executed.
20497  * @lpfc_buf: IO buf structure with the SGL chunk
20498  *
20499  * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
20500  *
20501  * Return codes:
20502  *   0 - Success
20503  *   -EINVAL - Error
20504  **/
20505 int
20506 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20507 {
20508         int rc = 0;
20509         struct sli4_hybrid_sgl *list_entry = NULL;
20510         struct sli4_hybrid_sgl *tmp = NULL;
20511         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20512         struct list_head *buf_list = &hdwq->sgl_list;
20513         unsigned long iflags;
20514
20515         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20516
20517         if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
20518                 list_for_each_entry_safe(list_entry, tmp,
20519                                          &lpfc_buf->dma_sgl_xtra_list,
20520                                          list_node) {
20521                         list_move_tail(&list_entry->list_node,
20522                                        buf_list);
20523                 }
20524         } else {
20525                 rc = -EINVAL;
20526         }
20527
20528         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20529         return rc;
20530 }
20531
20532 /**
20533  * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
20534  * @phba: phba object
20535  * @hdwq: hdwq to cleanup sgl buff resources on
20536  *
20537  * This routine frees all SGL chunks of hdwq SGL chunk pool.
20538  *
20539  * Return codes:
20540  *   None
20541  **/
20542 void
20543 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
20544                        struct lpfc_sli4_hdw_queue *hdwq)
20545 {
20546         struct list_head *buf_list = &hdwq->sgl_list;
20547         struct sli4_hybrid_sgl *list_entry = NULL;
20548         struct sli4_hybrid_sgl *tmp = NULL;
20549         unsigned long iflags;
20550
20551         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20552
20553         /* Free sgl pool */
20554         list_for_each_entry_safe(list_entry, tmp,
20555                                  buf_list, list_node) {
20556                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
20557                               list_entry->dma_sgl,
20558                               list_entry->dma_phys_sgl);
20559                 list_del(&list_entry->list_node);
20560                 kfree(list_entry);
20561         }
20562
20563         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20564 }
20565
20566 /**
20567  * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
20568  * @phba: The HBA for which this call is being executed.
20569  * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
20570  *
20571  * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
20572  * and will allocate an CMD/RSP buffer if the pool is empty.
20573  *
20574  * Return codes:
20575  *   NULL - Error
20576  *   Pointer to fcp_cmd_rsp_buf - Success
20577  **/
20578 struct fcp_cmd_rsp_buf *
20579 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20580                               struct lpfc_io_buf *lpfc_buf)
20581 {
20582         struct fcp_cmd_rsp_buf *list_entry = NULL;
20583         struct fcp_cmd_rsp_buf *tmp = NULL;
20584         struct fcp_cmd_rsp_buf *allocated_buf = NULL;
20585         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20586         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20587         unsigned long iflags;
20588
20589         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20590
20591         if (likely(!list_empty(buf_list))) {
20592                 /* break off 1 chunk from the list */
20593                 list_for_each_entry_safe(list_entry, tmp,
20594                                          buf_list,
20595                                          list_node) {
20596                         list_move_tail(&list_entry->list_node,
20597                                        &lpfc_buf->dma_cmd_rsp_list);
20598                         break;
20599                 }
20600         } else {
20601                 /* allocate more */
20602                 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20603                 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20604                                    cpu_to_node(smp_processor_id()));
20605                 if (!tmp) {
20606                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20607                                         "8355 error kmalloc memory for HDWQ "
20608                                         "%d %s\n",
20609                                         lpfc_buf->hdwq_no, __func__);
20610                         return NULL;
20611                 }
20612
20613                 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
20614                                                 GFP_ATOMIC,
20615                                                 &tmp->fcp_cmd_rsp_dma_handle);
20616
20617                 if (!tmp->fcp_cmnd) {
20618                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20619                                         "8356 error pool_alloc memory for HDWQ "
20620                                         "%d %s\n",
20621                                         lpfc_buf->hdwq_no, __func__);
20622                         kfree(tmp);
20623                         return NULL;
20624                 }
20625
20626                 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
20627                                 sizeof(struct fcp_cmnd));
20628
20629                 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20630                 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
20631         }
20632
20633         allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
20634                                         struct fcp_cmd_rsp_buf,
20635                                         list_node);
20636
20637         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20638
20639         return allocated_buf;
20640 }
20641
20642 /**
20643  * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
20644  * @phba: The HBA for which this call is being executed.
20645  * @lpfc_buf: IO buf structure with the CMD/RSP buf
20646  *
20647  * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
20648  *
20649  * Return codes:
20650  *   0 - Success
20651  *   -EINVAL - Error
20652  **/
20653 int
20654 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20655                               struct lpfc_io_buf *lpfc_buf)
20656 {
20657         int rc = 0;
20658         struct fcp_cmd_rsp_buf *list_entry = NULL;
20659         struct fcp_cmd_rsp_buf *tmp = NULL;
20660         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20661         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20662         unsigned long iflags;
20663
20664         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20665
20666         if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
20667                 list_for_each_entry_safe(list_entry, tmp,
20668                                          &lpfc_buf->dma_cmd_rsp_list,
20669                                          list_node) {
20670                         list_move_tail(&list_entry->list_node,
20671                                        buf_list);
20672                 }
20673         } else {
20674                 rc = -EINVAL;
20675         }
20676
20677         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20678         return rc;
20679 }
20680
20681 /**
20682  * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
20683  * @phba: phba object
20684  * @hdwq: hdwq to cleanup cmd rsp buff resources on
20685  *
20686  * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
20687  *
20688  * Return codes:
20689  *   None
20690  **/
20691 void
20692 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20693                                struct lpfc_sli4_hdw_queue *hdwq)
20694 {
20695         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20696         struct fcp_cmd_rsp_buf *list_entry = NULL;
20697         struct fcp_cmd_rsp_buf *tmp = NULL;
20698         unsigned long iflags;
20699
20700         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20701
20702         /* Free cmd_rsp buf pool */
20703         list_for_each_entry_safe(list_entry, tmp,
20704                                  buf_list,
20705                                  list_node) {
20706                 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
20707                               list_entry->fcp_cmnd,
20708                               list_entry->fcp_cmd_rsp_dma_handle);
20709                 list_del(&list_entry->list_node);
20710                 kfree(list_entry);
20711         }
20712
20713         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20714 }