1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_eqe *eqe, uint32_t qidx);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
90 struct lpfc_sli_ring *pring,
91 struct lpfc_iocbq *cmdiocb);
94 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
99 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
102 * @srcp: Source memory pointer.
103 * @destp: Destination memory pointer.
104 * @cnt: Number of words required to be copied.
105 * Must be a multiple of sizeof(uint64_t)
107 * This function is used for copying data between driver memory
108 * and the SLI WQ. This function also changes the endianness
109 * of each word if native endianness is different from SLI
110 * endianness. This function can be called with or without
114 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
116 uint64_t *src = srcp;
117 uint64_t *dest = destp;
120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
124 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
129 * @q: The Work Queue to operate on.
130 * @wqe: The work Queue Entry to put on the Work queue.
132 * This routine will copy the contents of @wqe to the next available entry on
133 * the @q. This function will then ring the Work Queue Doorbell to signal the
134 * HBA to start processing the Work Queue Entry. This function returns 0 if
135 * successful. If no entries are available on @q then this function will return
137 * The caller is expected to hold the hbalock when calling this routine.
140 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
142 union lpfc_wqe *temp_wqe;
143 struct lpfc_register doorbell;
150 /* sanity check on queue memory */
153 temp_wqe = q->qe[q->host_index].wqe;
155 /* If the host has not yet processed the next entry then we are done */
156 idx = ((q->host_index + 1) % q->entry_count);
157 if (idx == q->hba_index) {
162 /* set consumption flag every once in a while */
163 if (!((q->host_index + 1) % q->entry_repost))
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
167 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
168 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
169 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
170 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
171 /* write to DPP aperture taking advatage of Combined Writes */
172 tmp = (uint8_t *)temp_wqe;
174 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
175 __raw_writeq(*((uint64_t *)(tmp + i)),
178 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
179 __raw_writel(*((uint32_t *)(tmp + i)),
183 /* ensure WQE bcopy and DPP flushed before doorbell write */
186 /* Update the host index before invoking device */
187 host_index = q->host_index;
193 if (q->db_format == LPFC_DB_LIST_FORMAT) {
194 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
195 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
199 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
202 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
205 /* Leave bits <23:16> clear for if_type 6 dpp */
206 if_type = bf_get(lpfc_sli_intf_if_type,
207 &q->phba->sli4_hba.sli_intf);
208 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
209 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
212 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
213 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
214 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
218 writel(doorbell.word0, q->db_regaddr);
224 * lpfc_sli4_wq_release - Updates internal hba index for WQ
225 * @q: The Work Queue to operate on.
226 * @index: The index to advance the hba index to.
228 * This routine will update the HBA index of a queue to reflect consumption of
229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
230 * an entry the host calls this function to update the queue's internal
231 * pointers. This routine returns the number of entries that were consumed by
235 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
237 uint32_t released = 0;
239 /* sanity check on queue memory */
243 if (q->hba_index == index)
246 q->hba_index = ((q->hba_index + 1) % q->entry_count);
248 } while (q->hba_index != index);
253 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
254 * @q: The Mailbox Queue to operate on.
255 * @wqe: The Mailbox Queue Entry to put on the Work queue.
257 * This routine will copy the contents of @mqe to the next available entry on
258 * the @q. This function will then ring the Work Queue Doorbell to signal the
259 * HBA to start processing the Work Queue Entry. This function returns 0 if
260 * successful. If no entries are available on @q then this function will return
262 * The caller is expected to hold the hbalock when calling this routine.
265 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
267 struct lpfc_mqe *temp_mqe;
268 struct lpfc_register doorbell;
270 /* sanity check on queue memory */
273 temp_mqe = q->qe[q->host_index].mqe;
275 /* If the host has not yet processed the next entry then we are done */
276 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
278 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
279 /* Save off the mailbox pointer for completion */
280 q->phba->mbox = (MAILBOX_t *)temp_mqe;
282 /* Update the host index before invoking device */
283 q->host_index = ((q->host_index + 1) % q->entry_count);
287 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
288 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
289 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
294 * lpfc_sli4_mq_release - Updates internal hba index for MQ
295 * @q: The Mailbox Queue to operate on.
297 * This routine will update the HBA index of a queue to reflect consumption of
298 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
299 * an entry the host calls this function to update the queue's internal
300 * pointers. This routine returns the number of entries that were consumed by
304 lpfc_sli4_mq_release(struct lpfc_queue *q)
306 /* sanity check on queue memory */
310 /* Clear the mailbox pointer for completion */
311 q->phba->mbox = NULL;
312 q->hba_index = ((q->hba_index + 1) % q->entry_count);
317 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
318 * @q: The Event Queue to get the first valid EQE from
320 * This routine will get the first valid Event Queue Entry from @q, update
321 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
322 * the Queue (no more work to do), or the Queue is full of EQEs that have been
323 * processed, but not popped back to the HBA then this routine will return NULL.
325 static struct lpfc_eqe *
326 lpfc_sli4_eq_get(struct lpfc_queue *q)
328 struct lpfc_hba *phba;
329 struct lpfc_eqe *eqe;
332 /* sanity check on queue memory */
336 eqe = q->qe[q->hba_index].eqe;
338 /* If the next EQE is not valid then we are done */
339 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
341 /* If the host has not yet processed the next entry then we are done */
342 idx = ((q->hba_index + 1) % q->entry_count);
343 if (idx == q->host_index)
347 /* if the index wrapped around, toggle the valid bit */
348 if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
349 q->qe_valid = (q->qe_valid) ? 0 : 1;
353 * insert barrier for instruction interlock : data from the hardware
354 * must have the valid bit checked before it can be copied and acted
355 * upon. Speculative instructions were allowing a bcopy at the start
356 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
357 * after our return, to copy data before the valid bit check above
358 * was done. As such, some of the copied data was stale. The barrier
359 * ensures the check is before any data is copied.
366 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
367 * @q: The Event Queue to disable interrupts
371 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
373 struct lpfc_register doorbell;
376 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
377 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
378 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
379 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
380 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
385 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
386 * @q: The Event Queue to disable interrupts
390 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
392 struct lpfc_register doorbell;
395 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
396 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
400 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
401 * @q: The Event Queue that the host has completed processing for.
402 * @arm: Indicates whether the host wants to arms this CQ.
404 * This routine will mark all Event Queue Entries on @q, from the last
405 * known completed entry to the last entry that was processed, as completed
406 * by clearing the valid bit for each completion queue entry. Then it will
407 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
408 * The internal host index in the @q will be updated by this routine to indicate
409 * that the host has finished processing the entries. The @arm parameter
410 * indicates that the queue should be rearmed when ringing the doorbell.
412 * This function will return the number of EQEs that were popped.
415 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
417 uint32_t released = 0;
418 struct lpfc_hba *phba;
419 struct lpfc_eqe *temp_eqe;
420 struct lpfc_register doorbell;
422 /* sanity check on queue memory */
427 /* while there are valid entries */
428 while (q->hba_index != q->host_index) {
429 if (!phba->sli4_hba.pc_sli4_params.eqav) {
430 temp_eqe = q->qe[q->host_index].eqe;
431 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
434 q->host_index = ((q->host_index + 1) % q->entry_count);
436 if (unlikely(released == 0 && !arm))
439 /* ring doorbell for number popped */
442 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
443 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
445 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
446 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
447 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
448 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
449 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
450 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
451 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
452 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
453 readl(q->phba->sli4_hba.EQDBregaddr);
458 * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
459 * @q: The Event Queue that the host has completed processing for.
460 * @arm: Indicates whether the host wants to arms this CQ.
462 * This routine will mark all Event Queue Entries on @q, from the last
463 * known completed entry to the last entry that was processed, as completed
464 * by clearing the valid bit for each completion queue entry. Then it will
465 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
466 * The internal host index in the @q will be updated by this routine to indicate
467 * that the host has finished processing the entries. The @arm parameter
468 * indicates that the queue should be rearmed when ringing the doorbell.
470 * This function will return the number of EQEs that were popped.
473 lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
475 uint32_t released = 0;
476 struct lpfc_hba *phba;
477 struct lpfc_eqe *temp_eqe;
478 struct lpfc_register doorbell;
480 /* sanity check on queue memory */
485 /* while there are valid entries */
486 while (q->hba_index != q->host_index) {
487 if (!phba->sli4_hba.pc_sli4_params.eqav) {
488 temp_eqe = q->qe[q->host_index].eqe;
489 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
492 q->host_index = ((q->host_index + 1) % q->entry_count);
494 if (unlikely(released == 0 && !arm))
497 /* ring doorbell for number popped */
500 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
501 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
502 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
503 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
504 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
505 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
506 readl(q->phba->sli4_hba.EQDBregaddr);
511 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
512 * @q: The Completion Queue to get the first valid CQE from
514 * This routine will get the first valid Completion Queue Entry from @q, update
515 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
516 * the Queue (no more work to do), or the Queue is full of CQEs that have been
517 * processed, but not popped back to the HBA then this routine will return NULL.
519 static struct lpfc_cqe *
520 lpfc_sli4_cq_get(struct lpfc_queue *q)
522 struct lpfc_hba *phba;
523 struct lpfc_cqe *cqe;
526 /* sanity check on queue memory */
530 cqe = q->qe[q->hba_index].cqe;
532 /* If the next CQE is not valid then we are done */
533 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
535 /* If the host has not yet processed the next entry then we are done */
536 idx = ((q->hba_index + 1) % q->entry_count);
537 if (idx == q->host_index)
541 /* if the index wrapped around, toggle the valid bit */
542 if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
543 q->qe_valid = (q->qe_valid) ? 0 : 1;
546 * insert barrier for instruction interlock : data from the hardware
547 * must have the valid bit checked before it can be copied and acted
548 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
549 * instructions allowing action on content before valid bit checked,
550 * add barrier here as well. May not be needed as "content" is a
551 * single 32-bit entity here (vs multi word structure for cq's).
558 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
559 * @q: The Completion Queue that the host has completed processing for.
560 * @arm: Indicates whether the host wants to arms this CQ.
562 * This routine will mark all Completion queue entries on @q, from the last
563 * known completed entry to the last entry that was processed, as completed
564 * by clearing the valid bit for each completion queue entry. Then it will
565 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
566 * The internal host index in the @q will be updated by this routine to indicate
567 * that the host has finished processing the entries. The @arm parameter
568 * indicates that the queue should be rearmed when ringing the doorbell.
570 * This function will return the number of CQEs that were released.
573 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
575 uint32_t released = 0;
576 struct lpfc_hba *phba;
577 struct lpfc_cqe *temp_qe;
578 struct lpfc_register doorbell;
580 /* sanity check on queue memory */
585 /* while there are valid entries */
586 while (q->hba_index != q->host_index) {
587 if (!phba->sli4_hba.pc_sli4_params.cqav) {
588 temp_qe = q->qe[q->host_index].cqe;
589 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
592 q->host_index = ((q->host_index + 1) % q->entry_count);
594 if (unlikely(released == 0 && !arm))
597 /* ring doorbell for number popped */
600 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
601 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
602 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
603 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
604 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
605 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
606 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
611 * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
612 * @q: The Completion Queue that the host has completed processing for.
613 * @arm: Indicates whether the host wants to arms this CQ.
615 * This routine will mark all Completion queue entries on @q, from the last
616 * known completed entry to the last entry that was processed, as completed
617 * by clearing the valid bit for each completion queue entry. Then it will
618 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
619 * The internal host index in the @q will be updated by this routine to indicate
620 * that the host has finished processing the entries. The @arm parameter
621 * indicates that the queue should be rearmed when ringing the doorbell.
623 * This function will return the number of CQEs that were released.
626 lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
628 uint32_t released = 0;
629 struct lpfc_hba *phba;
630 struct lpfc_cqe *temp_qe;
631 struct lpfc_register doorbell;
633 /* sanity check on queue memory */
638 /* while there are valid entries */
639 while (q->hba_index != q->host_index) {
640 if (!phba->sli4_hba.pc_sli4_params.cqav) {
641 temp_qe = q->qe[q->host_index].cqe;
642 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
645 q->host_index = ((q->host_index + 1) % q->entry_count);
647 if (unlikely(released == 0 && !arm))
650 /* ring doorbell for number popped */
653 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
654 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
655 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
656 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
662 * @q: The Header Receive Queue to operate on.
663 * @wqe: The Receive Queue Entry to put on the Receive queue.
665 * This routine will copy the contents of @wqe to the next available entry on
666 * the @q. This function will then ring the Receive Queue Doorbell to signal the
667 * HBA to start processing the Receive Queue Entry. This function returns the
668 * index that the rqe was copied to if successful. If no entries are available
669 * on @q then this function will return -ENOMEM.
670 * The caller is expected to hold the hbalock when calling this routine.
673 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
674 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
676 struct lpfc_rqe *temp_hrqe;
677 struct lpfc_rqe *temp_drqe;
678 struct lpfc_register doorbell;
682 /* sanity check on queue memory */
683 if (unlikely(!hq) || unlikely(!dq))
685 hq_put_index = hq->host_index;
686 dq_put_index = dq->host_index;
687 temp_hrqe = hq->qe[hq_put_index].rqe;
688 temp_drqe = dq->qe[dq_put_index].rqe;
690 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
692 if (hq_put_index != dq_put_index)
694 /* If the host has not yet processed the next entry then we are done */
695 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
697 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
698 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
700 /* Update the host index to point to the next slot */
701 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
702 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
705 /* Ring The Header Receive Queue Doorbell */
706 if (!(hq->host_index % hq->entry_repost)) {
708 if (hq->db_format == LPFC_DB_RING_FORMAT) {
709 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
711 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
712 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
713 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
715 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
717 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
721 writel(doorbell.word0, hq->db_regaddr);
727 * lpfc_sli4_rq_release - Updates internal hba index for RQ
728 * @q: The Header Receive Queue to operate on.
730 * This routine will update the HBA index of a queue to reflect consumption of
731 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
732 * consumed an entry the host calls this function to update the queue's
733 * internal pointers. This routine returns the number of entries that were
734 * consumed by the HBA.
737 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
739 /* sanity check on queue memory */
740 if (unlikely(!hq) || unlikely(!dq))
743 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
745 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
746 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
751 * lpfc_cmd_iocb - Get next command iocb entry in the ring
752 * @phba: Pointer to HBA context object.
753 * @pring: Pointer to driver SLI ring object.
755 * This function returns pointer to next command iocb entry
756 * in the command ring. The caller must hold hbalock to prevent
757 * other threads consume the next command iocb.
758 * SLI-2/SLI-3 provide different sized iocbs.
760 static inline IOCB_t *
761 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
763 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
764 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
768 * lpfc_resp_iocb - Get next response iocb entry in the ring
769 * @phba: Pointer to HBA context object.
770 * @pring: Pointer to driver SLI ring object.
772 * This function returns pointer to next response iocb entry
773 * in the response ring. The caller must hold hbalock to make sure
774 * that no other thread consume the next response iocb.
775 * SLI-2/SLI-3 provide different sized iocbs.
777 static inline IOCB_t *
778 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
780 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
781 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
785 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
786 * @phba: Pointer to HBA context object.
788 * This function is called with hbalock held. This function
789 * allocates a new driver iocb object from the iocb pool. If the
790 * allocation is successful, it returns pointer to the newly
791 * allocated iocb object else it returns NULL.
794 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
796 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
797 struct lpfc_iocbq * iocbq = NULL;
799 lockdep_assert_held(&phba->hbalock);
801 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
804 if (phba->iocb_cnt > phba->iocb_max)
805 phba->iocb_max = phba->iocb_cnt;
810 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
811 * @phba: Pointer to HBA context object.
812 * @xritag: XRI value.
814 * This function clears the sglq pointer from the array of acive
815 * sglq's. The xritag that is passed in is used to index into the
816 * array. Before the xritag can be used it needs to be adjusted
817 * by subtracting the xribase.
819 * Returns sglq ponter = success, NULL = Failure.
822 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
824 struct lpfc_sglq *sglq;
826 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
827 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
832 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
833 * @phba: Pointer to HBA context object.
834 * @xritag: XRI value.
836 * This function returns the sglq pointer from the array of acive
837 * sglq's. The xritag that is passed in is used to index into the
838 * array. Before the xritag can be used it needs to be adjusted
839 * by subtracting the xribase.
841 * Returns sglq ponter = success, NULL = Failure.
844 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
846 struct lpfc_sglq *sglq;
848 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
853 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
854 * @phba: Pointer to HBA context object.
855 * @xritag: xri used in this exchange.
856 * @rrq: The RRQ to be cleared.
860 lpfc_clr_rrq_active(struct lpfc_hba *phba,
862 struct lpfc_node_rrq *rrq)
864 struct lpfc_nodelist *ndlp = NULL;
866 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
867 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
869 /* The target DID could have been swapped (cable swap)
870 * we should use the ndlp from the findnode if it is
873 if ((!ndlp) && rrq->ndlp)
879 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
882 rrq->rrq_stop_time = 0;
885 mempool_free(rrq, phba->rrq_pool);
889 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
890 * @phba: Pointer to HBA context object.
892 * This function is called with hbalock held. This function
893 * Checks if stop_time (ratov from setting rrq active) has
894 * been reached, if it has and the send_rrq flag is set then
895 * it will call lpfc_send_rrq. If the send_rrq flag is not set
896 * then it will just call the routine to clear the rrq and
897 * free the rrq resource.
898 * The timer is set to the next rrq that is going to expire before
899 * leaving the routine.
903 lpfc_handle_rrq_active(struct lpfc_hba *phba)
905 struct lpfc_node_rrq *rrq;
906 struct lpfc_node_rrq *nextrrq;
907 unsigned long next_time;
908 unsigned long iflags;
911 spin_lock_irqsave(&phba->hbalock, iflags);
912 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
913 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
914 list_for_each_entry_safe(rrq, nextrrq,
915 &phba->active_rrq_list, list) {
916 if (time_after(jiffies, rrq->rrq_stop_time))
917 list_move(&rrq->list, &send_rrq);
918 else if (time_before(rrq->rrq_stop_time, next_time))
919 next_time = rrq->rrq_stop_time;
921 spin_unlock_irqrestore(&phba->hbalock, iflags);
922 if ((!list_empty(&phba->active_rrq_list)) &&
923 (!(phba->pport->load_flag & FC_UNLOADING)))
924 mod_timer(&phba->rrq_tmr, next_time);
925 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
926 list_del(&rrq->list);
928 /* this call will free the rrq */
929 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
930 else if (lpfc_send_rrq(phba, rrq)) {
931 /* if we send the rrq then the completion handler
932 * will clear the bit in the xribitmap.
934 lpfc_clr_rrq_active(phba, rrq->xritag,
941 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
942 * @vport: Pointer to vport context object.
943 * @xri: The xri used in the exchange.
944 * @did: The targets DID for this exchange.
946 * returns NULL = rrq not found in the phba->active_rrq_list.
947 * rrq = rrq for this xri and target.
949 struct lpfc_node_rrq *
950 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
952 struct lpfc_hba *phba = vport->phba;
953 struct lpfc_node_rrq *rrq;
954 struct lpfc_node_rrq *nextrrq;
955 unsigned long iflags;
957 if (phba->sli_rev != LPFC_SLI_REV4)
959 spin_lock_irqsave(&phba->hbalock, iflags);
960 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
961 if (rrq->vport == vport && rrq->xritag == xri &&
962 rrq->nlp_DID == did){
963 list_del(&rrq->list);
964 spin_unlock_irqrestore(&phba->hbalock, iflags);
968 spin_unlock_irqrestore(&phba->hbalock, iflags);
973 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
974 * @vport: Pointer to vport context object.
975 * @ndlp: Pointer to the lpfc_node_list structure.
976 * If ndlp is NULL Remove all active RRQs for this vport from the
977 * phba->active_rrq_list and clear the rrq.
978 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
981 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
984 struct lpfc_hba *phba = vport->phba;
985 struct lpfc_node_rrq *rrq;
986 struct lpfc_node_rrq *nextrrq;
987 unsigned long iflags;
990 if (phba->sli_rev != LPFC_SLI_REV4)
993 lpfc_sli4_vport_delete_els_xri_aborted(vport);
994 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
996 spin_lock_irqsave(&phba->hbalock, iflags);
997 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
998 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
999 list_move(&rrq->list, &rrq_list);
1000 spin_unlock_irqrestore(&phba->hbalock, iflags);
1002 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1003 list_del(&rrq->list);
1004 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1009 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1010 * @phba: Pointer to HBA context object.
1011 * @ndlp: Targets nodelist pointer for this exchange.
1012 * @xritag the xri in the bitmap to test.
1014 * This function is called with hbalock held. This function
1015 * returns 0 = rrq not active for this xri
1016 * 1 = rrq is valid for this xri.
1019 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1022 lockdep_assert_held(&phba->hbalock);
1025 if (!ndlp->active_rrqs_xri_bitmap)
1027 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1034 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1035 * @phba: Pointer to HBA context object.
1036 * @ndlp: nodelist pointer for this target.
1037 * @xritag: xri used in this exchange.
1038 * @rxid: Remote Exchange ID.
1039 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1041 * This function takes the hbalock.
1042 * The active bit is always set in the active rrq xri_bitmap even
1043 * if there is no slot avaiable for the other rrq information.
1045 * returns 0 rrq actived for this xri
1046 * < 0 No memory or invalid ndlp.
1049 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1050 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1052 unsigned long iflags;
1053 struct lpfc_node_rrq *rrq;
1059 if (!phba->cfg_enable_rrq)
1062 spin_lock_irqsave(&phba->hbalock, iflags);
1063 if (phba->pport->load_flag & FC_UNLOADING) {
1064 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1069 * set the active bit even if there is no mem available.
1071 if (NLP_CHK_FREE_REQ(ndlp))
1074 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1077 if (!ndlp->active_rrqs_xri_bitmap)
1080 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1083 spin_unlock_irqrestore(&phba->hbalock, iflags);
1084 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1086 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1087 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1088 " DID:0x%x Send:%d\n",
1089 xritag, rxid, ndlp->nlp_DID, send_rrq);
1092 if (phba->cfg_enable_rrq == 1)
1093 rrq->send_rrq = send_rrq;
1096 rrq->xritag = xritag;
1097 rrq->rrq_stop_time = jiffies +
1098 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1100 rrq->nlp_DID = ndlp->nlp_DID;
1101 rrq->vport = ndlp->vport;
1103 spin_lock_irqsave(&phba->hbalock, iflags);
1104 empty = list_empty(&phba->active_rrq_list);
1105 list_add_tail(&rrq->list, &phba->active_rrq_list);
1106 phba->hba_flag |= HBA_RRQ_ACTIVE;
1108 lpfc_worker_wake_up(phba);
1109 spin_unlock_irqrestore(&phba->hbalock, iflags);
1112 spin_unlock_irqrestore(&phba->hbalock, iflags);
1113 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1114 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1115 " DID:0x%x Send:%d\n",
1116 xritag, rxid, ndlp->nlp_DID, send_rrq);
1121 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1122 * @phba: Pointer to HBA context object.
1123 * @piocb: Pointer to the iocbq.
1125 * This function is called with the ring lock held. This function
1126 * gets a new driver sglq object from the sglq list. If the
1127 * list is not empty then it is successful, it returns pointer to the newly
1128 * allocated sglq object else it returns NULL.
1130 static struct lpfc_sglq *
1131 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1133 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1134 struct lpfc_sglq *sglq = NULL;
1135 struct lpfc_sglq *start_sglq = NULL;
1136 struct lpfc_scsi_buf *lpfc_cmd;
1137 struct lpfc_nodelist *ndlp;
1140 lockdep_assert_held(&phba->hbalock);
1142 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1143 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
1144 ndlp = lpfc_cmd->rdata->pnode;
1145 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1146 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1147 ndlp = piocbq->context_un.ndlp;
1148 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1149 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1152 ndlp = piocbq->context_un.ndlp;
1154 ndlp = piocbq->context1;
1157 spin_lock(&phba->sli4_hba.sgl_list_lock);
1158 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1163 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1164 test_bit(sglq->sli4_lxritag,
1165 ndlp->active_rrqs_xri_bitmap)) {
1166 /* This xri has an rrq outstanding for this DID.
1167 * put it back in the list and get another xri.
1169 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1171 list_remove_head(lpfc_els_sgl_list, sglq,
1172 struct lpfc_sglq, list);
1173 if (sglq == start_sglq) {
1174 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1182 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1183 sglq->state = SGL_ALLOCATED;
1185 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1190 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1191 * @phba: Pointer to HBA context object.
1192 * @piocb: Pointer to the iocbq.
1194 * This function is called with the sgl_list lock held. This function
1195 * gets a new driver sglq object from the sglq list. If the
1196 * list is not empty then it is successful, it returns pointer to the newly
1197 * allocated sglq object else it returns NULL.
1200 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1202 struct list_head *lpfc_nvmet_sgl_list;
1203 struct lpfc_sglq *sglq = NULL;
1205 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1207 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1209 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1212 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1213 sglq->state = SGL_ALLOCATED;
1218 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1219 * @phba: Pointer to HBA context object.
1221 * This function is called with no lock held. This function
1222 * allocates a new driver iocb object from the iocb pool. If the
1223 * allocation is successful, it returns pointer to the newly
1224 * allocated iocb object else it returns NULL.
1227 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1229 struct lpfc_iocbq * iocbq = NULL;
1230 unsigned long iflags;
1232 spin_lock_irqsave(&phba->hbalock, iflags);
1233 iocbq = __lpfc_sli_get_iocbq(phba);
1234 spin_unlock_irqrestore(&phba->hbalock, iflags);
1239 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1240 * @phba: Pointer to HBA context object.
1241 * @iocbq: Pointer to driver iocb object.
1243 * This function is called with hbalock held to release driver
1244 * iocb object to the iocb pool. The iotag in the iocb object
1245 * does not change for each use of the iocb object. This function
1246 * clears all other fields of the iocb object when it is freed.
1247 * The sqlq structure that holds the xritag and phys and virtual
1248 * mappings for the scatter gather list is retrieved from the
1249 * active array of sglq. The get of the sglq pointer also clears
1250 * the entry in the array. If the status of the IO indiactes that
1251 * this IO was aborted then the sglq entry it put on the
1252 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1253 * IO has good status or fails for any other reason then the sglq
1254 * entry is added to the free list (lpfc_els_sgl_list).
1257 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1259 struct lpfc_sglq *sglq;
1260 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1261 unsigned long iflag = 0;
1262 struct lpfc_sli_ring *pring;
1264 lockdep_assert_held(&phba->hbalock);
1266 if (iocbq->sli4_xritag == NO_XRI)
1269 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1273 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1276 sglq->state = SGL_FREED;
1278 list_add_tail(&sglq->list,
1279 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1280 spin_unlock_irqrestore(
1281 &phba->sli4_hba.sgl_list_lock, iflag);
1285 pring = phba->sli4_hba.els_wq->pring;
1286 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1287 (sglq->state != SGL_XRI_ABORTED)) {
1288 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1290 list_add(&sglq->list,
1291 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1292 spin_unlock_irqrestore(
1293 &phba->sli4_hba.sgl_list_lock, iflag);
1295 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1297 sglq->state = SGL_FREED;
1299 list_add_tail(&sglq->list,
1300 &phba->sli4_hba.lpfc_els_sgl_list);
1301 spin_unlock_irqrestore(
1302 &phba->sli4_hba.sgl_list_lock, iflag);
1304 /* Check if TXQ queue needs to be serviced */
1305 if (!list_empty(&pring->txq))
1306 lpfc_worker_wake_up(phba);
1312 * Clean all volatile data fields, preserve iotag and node struct.
1314 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1315 iocbq->sli4_lxritag = NO_XRI;
1316 iocbq->sli4_xritag = NO_XRI;
1317 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1319 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1324 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1325 * @phba: Pointer to HBA context object.
1326 * @iocbq: Pointer to driver iocb object.
1328 * This function is called with hbalock held to release driver
1329 * iocb object to the iocb pool. The iotag in the iocb object
1330 * does not change for each use of the iocb object. This function
1331 * clears all other fields of the iocb object when it is freed.
1334 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1336 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1338 lockdep_assert_held(&phba->hbalock);
1341 * Clean all volatile data fields, preserve iotag and node struct.
1343 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1344 iocbq->sli4_xritag = NO_XRI;
1345 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1349 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1350 * @phba: Pointer to HBA context object.
1351 * @iocbq: Pointer to driver iocb object.
1353 * This function is called with hbalock held to release driver
1354 * iocb object to the iocb pool. The iotag in the iocb object
1355 * does not change for each use of the iocb object. This function
1356 * clears all other fields of the iocb object when it is freed.
1359 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1361 lockdep_assert_held(&phba->hbalock);
1363 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1368 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1369 * @phba: Pointer to HBA context object.
1370 * @iocbq: Pointer to driver iocb object.
1372 * This function is called with no lock held to release the iocb to
1376 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1378 unsigned long iflags;
1381 * Clean all volatile data fields, preserve iotag and node struct.
1383 spin_lock_irqsave(&phba->hbalock, iflags);
1384 __lpfc_sli_release_iocbq(phba, iocbq);
1385 spin_unlock_irqrestore(&phba->hbalock, iflags);
1389 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1390 * @phba: Pointer to HBA context object.
1391 * @iocblist: List of IOCBs.
1392 * @ulpstatus: ULP status in IOCB command field.
1393 * @ulpWord4: ULP word-4 in IOCB command field.
1395 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1396 * on the list by invoking the complete callback function associated with the
1397 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1401 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1402 uint32_t ulpstatus, uint32_t ulpWord4)
1404 struct lpfc_iocbq *piocb;
1406 while (!list_empty(iocblist)) {
1407 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1408 if (!piocb->iocb_cmpl)
1409 lpfc_sli_release_iocbq(phba, piocb);
1411 piocb->iocb.ulpStatus = ulpstatus;
1412 piocb->iocb.un.ulpWord[4] = ulpWord4;
1413 (piocb->iocb_cmpl) (phba, piocb, piocb);
1420 * lpfc_sli_iocb_cmd_type - Get the iocb type
1421 * @iocb_cmnd: iocb command code.
1423 * This function is called by ring event handler function to get the iocb type.
1424 * This function translates the iocb command to an iocb command type used to
1425 * decide the final disposition of each completed IOCB.
1426 * The function returns
1427 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1428 * LPFC_SOL_IOCB if it is a solicited iocb completion
1429 * LPFC_ABORT_IOCB if it is an abort iocb
1430 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1432 * The caller is not required to hold any lock.
1434 static lpfc_iocb_type
1435 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1437 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1439 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1442 switch (iocb_cmnd) {
1443 case CMD_XMIT_SEQUENCE_CR:
1444 case CMD_XMIT_SEQUENCE_CX:
1445 case CMD_XMIT_BCAST_CN:
1446 case CMD_XMIT_BCAST_CX:
1447 case CMD_ELS_REQUEST_CR:
1448 case CMD_ELS_REQUEST_CX:
1449 case CMD_CREATE_XRI_CR:
1450 case CMD_CREATE_XRI_CX:
1451 case CMD_GET_RPI_CN:
1452 case CMD_XMIT_ELS_RSP_CX:
1453 case CMD_GET_RPI_CR:
1454 case CMD_FCP_IWRITE_CR:
1455 case CMD_FCP_IWRITE_CX:
1456 case CMD_FCP_IREAD_CR:
1457 case CMD_FCP_IREAD_CX:
1458 case CMD_FCP_ICMND_CR:
1459 case CMD_FCP_ICMND_CX:
1460 case CMD_FCP_TSEND_CX:
1461 case CMD_FCP_TRSP_CX:
1462 case CMD_FCP_TRECEIVE_CX:
1463 case CMD_FCP_AUTO_TRSP_CX:
1464 case CMD_ADAPTER_MSG:
1465 case CMD_ADAPTER_DUMP:
1466 case CMD_XMIT_SEQUENCE64_CR:
1467 case CMD_XMIT_SEQUENCE64_CX:
1468 case CMD_XMIT_BCAST64_CN:
1469 case CMD_XMIT_BCAST64_CX:
1470 case CMD_ELS_REQUEST64_CR:
1471 case CMD_ELS_REQUEST64_CX:
1472 case CMD_FCP_IWRITE64_CR:
1473 case CMD_FCP_IWRITE64_CX:
1474 case CMD_FCP_IREAD64_CR:
1475 case CMD_FCP_IREAD64_CX:
1476 case CMD_FCP_ICMND64_CR:
1477 case CMD_FCP_ICMND64_CX:
1478 case CMD_FCP_TSEND64_CX:
1479 case CMD_FCP_TRSP64_CX:
1480 case CMD_FCP_TRECEIVE64_CX:
1481 case CMD_GEN_REQUEST64_CR:
1482 case CMD_GEN_REQUEST64_CX:
1483 case CMD_XMIT_ELS_RSP64_CX:
1484 case DSSCMD_IWRITE64_CR:
1485 case DSSCMD_IWRITE64_CX:
1486 case DSSCMD_IREAD64_CR:
1487 case DSSCMD_IREAD64_CX:
1488 type = LPFC_SOL_IOCB;
1490 case CMD_ABORT_XRI_CN:
1491 case CMD_ABORT_XRI_CX:
1492 case CMD_CLOSE_XRI_CN:
1493 case CMD_CLOSE_XRI_CX:
1494 case CMD_XRI_ABORTED_CX:
1495 case CMD_ABORT_MXRI64_CN:
1496 case CMD_XMIT_BLS_RSP64_CX:
1497 type = LPFC_ABORT_IOCB;
1499 case CMD_RCV_SEQUENCE_CX:
1500 case CMD_RCV_ELS_REQ_CX:
1501 case CMD_RCV_SEQUENCE64_CX:
1502 case CMD_RCV_ELS_REQ64_CX:
1503 case CMD_ASYNC_STATUS:
1504 case CMD_IOCB_RCV_SEQ64_CX:
1505 case CMD_IOCB_RCV_ELS64_CX:
1506 case CMD_IOCB_RCV_CONT64_CX:
1507 case CMD_IOCB_RET_XRI64_CX:
1508 type = LPFC_UNSOL_IOCB;
1510 case CMD_IOCB_XMIT_MSEQ64_CR:
1511 case CMD_IOCB_XMIT_MSEQ64_CX:
1512 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1513 case CMD_IOCB_RCV_ELS_LIST64_CX:
1514 case CMD_IOCB_CLOSE_EXTENDED_CN:
1515 case CMD_IOCB_ABORT_EXTENDED_CN:
1516 case CMD_IOCB_RET_HBQE64_CN:
1517 case CMD_IOCB_FCP_IBIDIR64_CR:
1518 case CMD_IOCB_FCP_IBIDIR64_CX:
1519 case CMD_IOCB_FCP_ITASKMGT64_CX:
1520 case CMD_IOCB_LOGENTRY_CN:
1521 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1522 printk("%s - Unhandled SLI-3 Command x%x\n",
1523 __func__, iocb_cmnd);
1524 type = LPFC_UNKNOWN_IOCB;
1527 type = LPFC_UNKNOWN_IOCB;
1535 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1536 * @phba: Pointer to HBA context object.
1538 * This function is called from SLI initialization code
1539 * to configure every ring of the HBA's SLI interface. The
1540 * caller is not required to hold any lock. This function issues
1541 * a config_ring mailbox command for each ring.
1542 * This function returns zero if successful else returns a negative
1546 lpfc_sli_ring_map(struct lpfc_hba *phba)
1548 struct lpfc_sli *psli = &phba->sli;
1553 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1557 phba->link_state = LPFC_INIT_MBX_CMDS;
1558 for (i = 0; i < psli->num_rings; i++) {
1559 lpfc_config_ring(phba, i, pmb);
1560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1561 if (rc != MBX_SUCCESS) {
1562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1563 "0446 Adapter failed to init (%d), "
1564 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1566 rc, pmbox->mbxCommand,
1567 pmbox->mbxStatus, i);
1568 phba->link_state = LPFC_HBA_ERROR;
1573 mempool_free(pmb, phba->mbox_mem_pool);
1578 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1579 * @phba: Pointer to HBA context object.
1580 * @pring: Pointer to driver SLI ring object.
1581 * @piocb: Pointer to the driver iocb object.
1583 * This function is called with hbalock held. The function adds the
1584 * new iocb to txcmplq of the given ring. This function always returns
1585 * 0. If this function is called for ELS ring, this function checks if
1586 * there is a vport associated with the ELS command. This function also
1587 * starts els_tmofunc timer if this is an ELS command.
1590 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1591 struct lpfc_iocbq *piocb)
1593 lockdep_assert_held(&phba->hbalock);
1597 list_add_tail(&piocb->list, &pring->txcmplq);
1598 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1600 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1601 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1602 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1603 BUG_ON(!piocb->vport);
1604 if (!(piocb->vport->load_flag & FC_UNLOADING))
1605 mod_timer(&piocb->vport->els_tmofunc,
1607 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1614 * lpfc_sli_ringtx_get - Get first element of the txq
1615 * @phba: Pointer to HBA context object.
1616 * @pring: Pointer to driver SLI ring object.
1618 * This function is called with hbalock held to get next
1619 * iocb in txq of the given ring. If there is any iocb in
1620 * the txq, the function returns first iocb in the list after
1621 * removing the iocb from the list, else it returns NULL.
1624 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1626 struct lpfc_iocbq *cmd_iocb;
1628 lockdep_assert_held(&phba->hbalock);
1630 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1635 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1636 * @phba: Pointer to HBA context object.
1637 * @pring: Pointer to driver SLI ring object.
1639 * This function is called with hbalock held and the caller must post the
1640 * iocb without releasing the lock. If the caller releases the lock,
1641 * iocb slot returned by the function is not guaranteed to be available.
1642 * The function returns pointer to the next available iocb slot if there
1643 * is available slot in the ring, else it returns NULL.
1644 * If the get index of the ring is ahead of the put index, the function
1645 * will post an error attention event to the worker thread to take the
1646 * HBA to offline state.
1649 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1651 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1652 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1654 lockdep_assert_held(&phba->hbalock);
1656 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1657 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1658 pring->sli.sli3.next_cmdidx = 0;
1660 if (unlikely(pring->sli.sli3.local_getidx ==
1661 pring->sli.sli3.next_cmdidx)) {
1663 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1665 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1667 "0315 Ring %d issue: portCmdGet %d "
1668 "is bigger than cmd ring %d\n",
1670 pring->sli.sli3.local_getidx,
1673 phba->link_state = LPFC_HBA_ERROR;
1675 * All error attention handlers are posted to
1678 phba->work_ha |= HA_ERATT;
1679 phba->work_hs = HS_FFER3;
1681 lpfc_worker_wake_up(phba);
1686 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1690 return lpfc_cmd_iocb(phba, pring);
1694 * lpfc_sli_next_iotag - Get an iotag for the iocb
1695 * @phba: Pointer to HBA context object.
1696 * @iocbq: Pointer to driver iocb object.
1698 * This function gets an iotag for the iocb. If there is no unused iotag and
1699 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1700 * array and assigns a new iotag.
1701 * The function returns the allocated iotag if successful, else returns zero.
1702 * Zero is not a valid iotag.
1703 * The caller is not required to hold any lock.
1706 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1708 struct lpfc_iocbq **new_arr;
1709 struct lpfc_iocbq **old_arr;
1711 struct lpfc_sli *psli = &phba->sli;
1714 spin_lock_irq(&phba->hbalock);
1715 iotag = psli->last_iotag;
1716 if(++iotag < psli->iocbq_lookup_len) {
1717 psli->last_iotag = iotag;
1718 psli->iocbq_lookup[iotag] = iocbq;
1719 spin_unlock_irq(&phba->hbalock);
1720 iocbq->iotag = iotag;
1722 } else if (psli->iocbq_lookup_len < (0xffff
1723 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1724 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1725 spin_unlock_irq(&phba->hbalock);
1726 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1729 spin_lock_irq(&phba->hbalock);
1730 old_arr = psli->iocbq_lookup;
1731 if (new_len <= psli->iocbq_lookup_len) {
1732 /* highly unprobable case */
1734 iotag = psli->last_iotag;
1735 if(++iotag < psli->iocbq_lookup_len) {
1736 psli->last_iotag = iotag;
1737 psli->iocbq_lookup[iotag] = iocbq;
1738 spin_unlock_irq(&phba->hbalock);
1739 iocbq->iotag = iotag;
1742 spin_unlock_irq(&phba->hbalock);
1745 if (psli->iocbq_lookup)
1746 memcpy(new_arr, old_arr,
1747 ((psli->last_iotag + 1) *
1748 sizeof (struct lpfc_iocbq *)));
1749 psli->iocbq_lookup = new_arr;
1750 psli->iocbq_lookup_len = new_len;
1751 psli->last_iotag = iotag;
1752 psli->iocbq_lookup[iotag] = iocbq;
1753 spin_unlock_irq(&phba->hbalock);
1754 iocbq->iotag = iotag;
1759 spin_unlock_irq(&phba->hbalock);
1761 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1762 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1769 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1770 * @phba: Pointer to HBA context object.
1771 * @pring: Pointer to driver SLI ring object.
1772 * @iocb: Pointer to iocb slot in the ring.
1773 * @nextiocb: Pointer to driver iocb object which need to be
1774 * posted to firmware.
1776 * This function is called with hbalock held to post a new iocb to
1777 * the firmware. This function copies the new iocb to ring iocb slot and
1778 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1779 * a completion call back for this iocb else the function will free the
1783 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1784 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1786 lockdep_assert_held(&phba->hbalock);
1790 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1793 if (pring->ringno == LPFC_ELS_RING) {
1794 lpfc_debugfs_slow_ring_trc(phba,
1795 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1796 *(((uint32_t *) &nextiocb->iocb) + 4),
1797 *(((uint32_t *) &nextiocb->iocb) + 6),
1798 *(((uint32_t *) &nextiocb->iocb) + 7));
1802 * Issue iocb command to adapter
1804 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1806 pring->stats.iocb_cmd++;
1809 * If there is no completion routine to call, we can release the
1810 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1811 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1813 if (nextiocb->iocb_cmpl)
1814 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1816 __lpfc_sli_release_iocbq(phba, nextiocb);
1819 * Let the HBA know what IOCB slot will be the next one the
1820 * driver will put a command into.
1822 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1823 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1827 * lpfc_sli_update_full_ring - Update the chip attention register
1828 * @phba: Pointer to HBA context object.
1829 * @pring: Pointer to driver SLI ring object.
1831 * The caller is not required to hold any lock for calling this function.
1832 * This function updates the chip attention bits for the ring to inform firmware
1833 * that there are pending work to be done for this ring and requests an
1834 * interrupt when there is space available in the ring. This function is
1835 * called when the driver is unable to post more iocbs to the ring due
1836 * to unavailability of space in the ring.
1839 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1841 int ringno = pring->ringno;
1843 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1848 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1849 * The HBA will tell us when an IOCB entry is available.
1851 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1852 readl(phba->CAregaddr); /* flush */
1854 pring->stats.iocb_cmd_full++;
1858 * lpfc_sli_update_ring - Update chip attention register
1859 * @phba: Pointer to HBA context object.
1860 * @pring: Pointer to driver SLI ring object.
1862 * This function updates the chip attention register bit for the
1863 * given ring to inform HBA that there is more work to be done
1864 * in this ring. The caller is not required to hold any lock.
1867 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1869 int ringno = pring->ringno;
1872 * Tell the HBA that there is work to do in this ring.
1874 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1876 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1877 readl(phba->CAregaddr); /* flush */
1882 * lpfc_sli_resume_iocb - Process iocbs in the txq
1883 * @phba: Pointer to HBA context object.
1884 * @pring: Pointer to driver SLI ring object.
1886 * This function is called with hbalock held to post pending iocbs
1887 * in the txq to the firmware. This function is called when driver
1888 * detects space available in the ring.
1891 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1894 struct lpfc_iocbq *nextiocb;
1896 lockdep_assert_held(&phba->hbalock);
1900 * (a) there is anything on the txq to send
1902 * (c) link attention events can be processed (fcp ring only)
1903 * (d) IOCB processing is not blocked by the outstanding mbox command.
1906 if (lpfc_is_link_up(phba) &&
1907 (!list_empty(&pring->txq)) &&
1908 (pring->ringno != LPFC_FCP_RING ||
1909 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1911 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1912 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1913 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1916 lpfc_sli_update_ring(phba, pring);
1918 lpfc_sli_update_full_ring(phba, pring);
1925 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1926 * @phba: Pointer to HBA context object.
1927 * @hbqno: HBQ number.
1929 * This function is called with hbalock held to get the next
1930 * available slot for the given HBQ. If there is free slot
1931 * available for the HBQ it will return pointer to the next available
1932 * HBQ entry else it will return NULL.
1934 static struct lpfc_hbq_entry *
1935 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1937 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1939 lockdep_assert_held(&phba->hbalock);
1941 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1942 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1943 hbqp->next_hbqPutIdx = 0;
1945 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1946 uint32_t raw_index = phba->hbq_get[hbqno];
1947 uint32_t getidx = le32_to_cpu(raw_index);
1949 hbqp->local_hbqGetIdx = getidx;
1951 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1952 lpfc_printf_log(phba, KERN_ERR,
1953 LOG_SLI | LOG_VPORT,
1954 "1802 HBQ %d: local_hbqGetIdx "
1955 "%u is > than hbqp->entry_count %u\n",
1956 hbqno, hbqp->local_hbqGetIdx,
1959 phba->link_state = LPFC_HBA_ERROR;
1963 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1967 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1972 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1973 * @phba: Pointer to HBA context object.
1975 * This function is called with no lock held to free all the
1976 * hbq buffers while uninitializing the SLI interface. It also
1977 * frees the HBQ buffers returned by the firmware but not yet
1978 * processed by the upper layers.
1981 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1983 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1984 struct hbq_dmabuf *hbq_buf;
1985 unsigned long flags;
1988 hbq_count = lpfc_sli_hbq_count();
1989 /* Return all memory used by all HBQs */
1990 spin_lock_irqsave(&phba->hbalock, flags);
1991 for (i = 0; i < hbq_count; ++i) {
1992 list_for_each_entry_safe(dmabuf, next_dmabuf,
1993 &phba->hbqs[i].hbq_buffer_list, list) {
1994 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1995 list_del(&hbq_buf->dbuf.list);
1996 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1998 phba->hbqs[i].buffer_count = 0;
2001 /* Mark the HBQs not in use */
2002 phba->hbq_in_use = 0;
2003 spin_unlock_irqrestore(&phba->hbalock, flags);
2007 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2008 * @phba: Pointer to HBA context object.
2009 * @hbqno: HBQ number.
2010 * @hbq_buf: Pointer to HBQ buffer.
2012 * This function is called with the hbalock held to post a
2013 * hbq buffer to the firmware. If the function finds an empty
2014 * slot in the HBQ, it will post the buffer. The function will return
2015 * pointer to the hbq entry if it successfully post the buffer
2016 * else it will return NULL.
2019 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2020 struct hbq_dmabuf *hbq_buf)
2022 lockdep_assert_held(&phba->hbalock);
2023 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2027 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2028 * @phba: Pointer to HBA context object.
2029 * @hbqno: HBQ number.
2030 * @hbq_buf: Pointer to HBQ buffer.
2032 * This function is called with the hbalock held to post a hbq buffer to the
2033 * firmware. If the function finds an empty slot in the HBQ, it will post the
2034 * buffer and place it on the hbq_buffer_list. The function will return zero if
2035 * it successfully post the buffer else it will return an error.
2038 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2039 struct hbq_dmabuf *hbq_buf)
2041 struct lpfc_hbq_entry *hbqe;
2042 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2044 lockdep_assert_held(&phba->hbalock);
2045 /* Get next HBQ entry slot to use */
2046 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2048 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2050 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2051 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2052 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2053 hbqe->bde.tus.f.bdeFlags = 0;
2054 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2055 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2057 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2058 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2060 readl(phba->hbq_put + hbqno);
2061 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2068 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2069 * @phba: Pointer to HBA context object.
2070 * @hbqno: HBQ number.
2071 * @hbq_buf: Pointer to HBQ buffer.
2073 * This function is called with the hbalock held to post an RQE to the SLI4
2074 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2075 * the hbq_buffer_list and return zero, otherwise it will return an error.
2078 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2079 struct hbq_dmabuf *hbq_buf)
2082 struct lpfc_rqe hrqe;
2083 struct lpfc_rqe drqe;
2084 struct lpfc_queue *hrq;
2085 struct lpfc_queue *drq;
2087 if (hbqno != LPFC_ELS_HBQ)
2089 hrq = phba->sli4_hba.hdr_rq;
2090 drq = phba->sli4_hba.dat_rq;
2092 lockdep_assert_held(&phba->hbalock);
2093 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2094 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2095 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2096 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2097 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2100 hbq_buf->tag = (rc | (hbqno << 16));
2101 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2105 /* HBQ for ELS and CT traffic. */
2106 static struct lpfc_hbq_init lpfc_els_hbq = {
2111 .ring_mask = (1 << LPFC_ELS_RING),
2118 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2123 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2124 * @phba: Pointer to HBA context object.
2125 * @hbqno: HBQ number.
2126 * @count: Number of HBQ buffers to be posted.
2128 * This function is called with no lock held to post more hbq buffers to the
2129 * given HBQ. The function returns the number of HBQ buffers successfully
2133 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2135 uint32_t i, posted = 0;
2136 unsigned long flags;
2137 struct hbq_dmabuf *hbq_buffer;
2138 LIST_HEAD(hbq_buf_list);
2139 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2142 if ((phba->hbqs[hbqno].buffer_count + count) >
2143 lpfc_hbq_defs[hbqno]->entry_count)
2144 count = lpfc_hbq_defs[hbqno]->entry_count -
2145 phba->hbqs[hbqno].buffer_count;
2148 /* Allocate HBQ entries */
2149 for (i = 0; i < count; i++) {
2150 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2153 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2155 /* Check whether HBQ is still in use */
2156 spin_lock_irqsave(&phba->hbalock, flags);
2157 if (!phba->hbq_in_use)
2159 while (!list_empty(&hbq_buf_list)) {
2160 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2162 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2164 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2165 phba->hbqs[hbqno].buffer_count++;
2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2170 spin_unlock_irqrestore(&phba->hbalock, flags);
2173 spin_unlock_irqrestore(&phba->hbalock, flags);
2174 while (!list_empty(&hbq_buf_list)) {
2175 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2177 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2183 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2184 * @phba: Pointer to HBA context object.
2187 * This function posts more buffers to the HBQ. This function
2188 * is called with no lock held. The function returns the number of HBQ entries
2189 * successfully allocated.
2192 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2194 if (phba->sli_rev == LPFC_SLI_REV4)
2197 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2198 lpfc_hbq_defs[qno]->add_count);
2202 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2203 * @phba: Pointer to HBA context object.
2204 * @qno: HBQ queue number.
2206 * This function is called from SLI initialization code path with
2207 * no lock held to post initial HBQ buffers to firmware. The
2208 * function returns the number of HBQ entries successfully allocated.
2211 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2213 if (phba->sli_rev == LPFC_SLI_REV4)
2214 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2215 lpfc_hbq_defs[qno]->entry_count);
2217 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2218 lpfc_hbq_defs[qno]->init_count);
2222 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2223 * @phba: Pointer to HBA context object.
2224 * @hbqno: HBQ number.
2226 * This function removes the first hbq buffer on an hbq list and returns a
2227 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2229 static struct hbq_dmabuf *
2230 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2232 struct lpfc_dmabuf *d_buf;
2234 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2237 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2241 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2242 * @phba: Pointer to HBA context object.
2243 * @hbqno: HBQ number.
2245 * This function removes the first RQ buffer on an RQ buffer list and returns a
2246 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2248 static struct rqb_dmabuf *
2249 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2251 struct lpfc_dmabuf *h_buf;
2252 struct lpfc_rqb *rqbp;
2255 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2256 struct lpfc_dmabuf, list);
2259 rqbp->buffer_count--;
2260 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2264 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2265 * @phba: Pointer to HBA context object.
2266 * @tag: Tag of the hbq buffer.
2268 * This function searches for the hbq buffer associated with the given tag in
2269 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2270 * otherwise it returns NULL.
2272 static struct hbq_dmabuf *
2273 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2275 struct lpfc_dmabuf *d_buf;
2276 struct hbq_dmabuf *hbq_buf;
2280 if (hbqno >= LPFC_MAX_HBQS)
2283 spin_lock_irq(&phba->hbalock);
2284 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2285 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2286 if (hbq_buf->tag == tag) {
2287 spin_unlock_irq(&phba->hbalock);
2291 spin_unlock_irq(&phba->hbalock);
2292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2293 "1803 Bad hbq tag. Data: x%x x%x\n",
2294 tag, phba->hbqs[tag >> 16].buffer_count);
2299 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2300 * @phba: Pointer to HBA context object.
2301 * @hbq_buffer: Pointer to HBQ buffer.
2303 * This function is called with hbalock. This function gives back
2304 * the hbq buffer to firmware. If the HBQ does not have space to
2305 * post the buffer, it will free the buffer.
2308 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2313 hbqno = hbq_buffer->tag >> 16;
2314 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2315 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2320 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2321 * @mbxCommand: mailbox command code.
2323 * This function is called by the mailbox event handler function to verify
2324 * that the completed mailbox command is a legitimate mailbox command. If the
2325 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2326 * and the mailbox event handler will take the HBA offline.
2329 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2333 switch (mbxCommand) {
2337 case MBX_WRITE_VPARMS:
2338 case MBX_RUN_BIU_DIAG:
2341 case MBX_CONFIG_LINK:
2342 case MBX_CONFIG_RING:
2343 case MBX_RESET_RING:
2344 case MBX_READ_CONFIG:
2345 case MBX_READ_RCONFIG:
2346 case MBX_READ_SPARM:
2347 case MBX_READ_STATUS:
2351 case MBX_READ_LNK_STAT:
2353 case MBX_UNREG_LOGIN:
2355 case MBX_DUMP_MEMORY:
2356 case MBX_DUMP_CONTEXT:
2359 case MBX_UPDATE_CFG:
2361 case MBX_DEL_LD_ENTRY:
2362 case MBX_RUN_PROGRAM:
2364 case MBX_SET_VARIABLE:
2365 case MBX_UNREG_D_ID:
2366 case MBX_KILL_BOARD:
2367 case MBX_CONFIG_FARP:
2370 case MBX_RUN_BIU_DIAG64:
2371 case MBX_CONFIG_PORT:
2372 case MBX_READ_SPARM64:
2373 case MBX_READ_RPI64:
2374 case MBX_REG_LOGIN64:
2375 case MBX_READ_TOPOLOGY:
2378 case MBX_LOAD_EXP_ROM:
2379 case MBX_ASYNCEVT_ENABLE:
2383 case MBX_PORT_CAPABILITIES:
2384 case MBX_PORT_IOV_CONTROL:
2385 case MBX_SLI4_CONFIG:
2386 case MBX_SLI4_REQ_FTRS:
2388 case MBX_UNREG_FCFI:
2393 case MBX_RESUME_RPI:
2394 case MBX_READ_EVENT_LOG_STATUS:
2395 case MBX_READ_EVENT_LOG:
2396 case MBX_SECURITY_MGMT:
2398 case MBX_ACCESS_VDATA:
2409 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2410 * @phba: Pointer to HBA context object.
2411 * @pmboxq: Pointer to mailbox command.
2413 * This is completion handler function for mailbox commands issued from
2414 * lpfc_sli_issue_mbox_wait function. This function is called by the
2415 * mailbox event handler function with no lock held. This function
2416 * will wake up thread waiting on the wait queue pointed by context1
2420 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2422 unsigned long drvr_flag;
2423 struct completion *pmbox_done;
2426 * If pmbox_done is empty, the driver thread gave up waiting and
2427 * continued running.
2429 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2430 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2431 pmbox_done = (struct completion *)pmboxq->context3;
2433 complete(pmbox_done);
2434 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2440 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2441 * @phba: Pointer to HBA context object.
2442 * @pmb: Pointer to mailbox object.
2444 * This function is the default mailbox completion handler. It
2445 * frees the memory resources associated with the completed mailbox
2446 * command. If the completed command is a REG_LOGIN mailbox command,
2447 * this function will issue a UREG_LOGIN to re-claim the RPI.
2450 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2452 struct lpfc_vport *vport = pmb->vport;
2453 struct lpfc_dmabuf *mp;
2454 struct lpfc_nodelist *ndlp;
2455 struct Scsi_Host *shost;
2459 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2467 * If a REG_LOGIN succeeded after node is destroyed or node
2468 * is in re-discovery driver need to cleanup the RPI.
2470 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2471 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2472 !pmb->u.mb.mbxStatus) {
2473 rpi = pmb->u.mb.un.varWords[0];
2474 vpi = pmb->u.mb.un.varRegLogin.vpi;
2475 lpfc_unreg_login(phba, vpi, rpi, pmb);
2477 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2478 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2479 if (rc != MBX_NOT_FINISHED)
2483 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2484 !(phba->pport->load_flag & FC_UNLOADING) &&
2485 !pmb->u.mb.mbxStatus) {
2486 shost = lpfc_shost_from_vport(vport);
2487 spin_lock_irq(shost->host_lock);
2488 vport->vpi_state |= LPFC_VPI_REGISTERED;
2489 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2490 spin_unlock_irq(shost->host_lock);
2493 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2494 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2496 pmb->ctx_buf = NULL;
2497 pmb->ctx_ndlp = NULL;
2500 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2501 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2503 /* Check to see if there are any deferred events to process */
2507 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2508 "1438 UNREG cmpl deferred mbox x%x "
2509 "on NPort x%x Data: x%x x%x %p\n",
2510 ndlp->nlp_rpi, ndlp->nlp_DID,
2511 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2513 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2514 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2515 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2516 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2517 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2519 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2522 pmb->ctx_ndlp = NULL;
2525 /* Check security permission status on INIT_LINK mailbox command */
2526 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2527 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2528 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2529 "2860 SLI authentication is required "
2530 "for INIT_LINK but has not done yet\n");
2532 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2533 lpfc_sli4_mbox_cmd_free(phba, pmb);
2535 mempool_free(pmb, phba->mbox_mem_pool);
2538 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2539 * @phba: Pointer to HBA context object.
2540 * @pmb: Pointer to mailbox object.
2542 * This function is the unreg rpi mailbox completion handler. It
2543 * frees the memory resources associated with the completed mailbox
2544 * command. An additional refrenece is put on the ndlp to prevent
2545 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2546 * the unreg mailbox command completes, this routine puts the
2551 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2553 struct lpfc_vport *vport = pmb->vport;
2554 struct lpfc_nodelist *ndlp;
2556 ndlp = pmb->ctx_ndlp;
2557 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2558 if (phba->sli_rev == LPFC_SLI_REV4 &&
2559 (bf_get(lpfc_sli_intf_if_type,
2560 &phba->sli4_hba.sli_intf) >=
2561 LPFC_SLI_INTF_IF_TYPE_2)) {
2564 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2565 "0010 UNREG_LOGIN vpi:%x "
2566 "rpi:%x DID:%x defer x%x flg x%x "
2568 vport->vpi, ndlp->nlp_rpi,
2569 ndlp->nlp_DID, ndlp->nlp_defer_did,
2571 ndlp->nlp_usg_map, ndlp);
2572 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2575 /* Check to see if there are any deferred
2578 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2579 (ndlp->nlp_defer_did !=
2580 NLP_EVT_NOTHING_PENDING)) {
2582 vport, KERN_INFO, LOG_DISCOVERY,
2583 "4111 UNREG cmpl deferred "
2585 "NPort x%x Data: x%x %p\n",
2586 ndlp->nlp_rpi, ndlp->nlp_DID,
2587 ndlp->nlp_defer_did, ndlp);
2588 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2589 ndlp->nlp_defer_did =
2590 NLP_EVT_NOTHING_PENDING;
2591 lpfc_issue_els_plogi(
2592 vport, ndlp->nlp_DID, 0);
2594 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2600 mempool_free(pmb, phba->mbox_mem_pool);
2604 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2605 * @phba: Pointer to HBA context object.
2607 * This function is called with no lock held. This function processes all
2608 * the completed mailbox commands and gives it to upper layers. The interrupt
2609 * service routine processes mailbox completion interrupt and adds completed
2610 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2611 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2612 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2613 * function returns the mailbox commands to the upper layer by calling the
2614 * completion handler function of each mailbox.
2617 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2624 phba->sli.slistat.mbox_event++;
2626 /* Get all completed mailboxe buffers into the cmplq */
2627 spin_lock_irq(&phba->hbalock);
2628 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2629 spin_unlock_irq(&phba->hbalock);
2631 /* Get a Mailbox buffer to setup mailbox commands for callback */
2633 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2639 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2641 lpfc_debugfs_disc_trc(pmb->vport,
2642 LPFC_DISC_TRC_MBOX_VPORT,
2643 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2644 (uint32_t)pmbox->mbxCommand,
2645 pmbox->un.varWords[0],
2646 pmbox->un.varWords[1]);
2649 lpfc_debugfs_disc_trc(phba->pport,
2651 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2652 (uint32_t)pmbox->mbxCommand,
2653 pmbox->un.varWords[0],
2654 pmbox->un.varWords[1]);
2659 * It is a fatal error if unknown mbox command completion.
2661 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2663 /* Unknown mailbox command compl */
2664 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2665 "(%d):0323 Unknown Mailbox command "
2666 "x%x (x%x/x%x) Cmpl\n",
2667 pmb->vport ? pmb->vport->vpi : 0,
2669 lpfc_sli_config_mbox_subsys_get(phba,
2671 lpfc_sli_config_mbox_opcode_get(phba,
2673 phba->link_state = LPFC_HBA_ERROR;
2674 phba->work_hs = HS_FFER3;
2675 lpfc_handle_eratt(phba);
2679 if (pmbox->mbxStatus) {
2680 phba->sli.slistat.mbox_stat_err++;
2681 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2682 /* Mbox cmd cmpl error - RETRYing */
2683 lpfc_printf_log(phba, KERN_INFO,
2685 "(%d):0305 Mbox cmd cmpl "
2686 "error - RETRYing Data: x%x "
2687 "(x%x/x%x) x%x x%x x%x\n",
2688 pmb->vport ? pmb->vport->vpi : 0,
2690 lpfc_sli_config_mbox_subsys_get(phba,
2692 lpfc_sli_config_mbox_opcode_get(phba,
2695 pmbox->un.varWords[0],
2696 pmb->vport->port_state);
2697 pmbox->mbxStatus = 0;
2698 pmbox->mbxOwner = OWN_HOST;
2699 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2700 if (rc != MBX_NOT_FINISHED)
2705 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2706 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2707 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2708 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2710 pmb->vport ? pmb->vport->vpi : 0,
2712 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2713 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2715 *((uint32_t *) pmbox),
2716 pmbox->un.varWords[0],
2717 pmbox->un.varWords[1],
2718 pmbox->un.varWords[2],
2719 pmbox->un.varWords[3],
2720 pmbox->un.varWords[4],
2721 pmbox->un.varWords[5],
2722 pmbox->un.varWords[6],
2723 pmbox->un.varWords[7],
2724 pmbox->un.varWords[8],
2725 pmbox->un.varWords[9],
2726 pmbox->un.varWords[10]);
2729 pmb->mbox_cmpl(phba,pmb);
2735 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2736 * @phba: Pointer to HBA context object.
2737 * @pring: Pointer to driver SLI ring object.
2740 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2741 * is set in the tag the buffer is posted for a particular exchange,
2742 * the function will return the buffer without replacing the buffer.
2743 * If the buffer is for unsolicited ELS or CT traffic, this function
2744 * returns the buffer and also posts another buffer to the firmware.
2746 static struct lpfc_dmabuf *
2747 lpfc_sli_get_buff(struct lpfc_hba *phba,
2748 struct lpfc_sli_ring *pring,
2751 struct hbq_dmabuf *hbq_entry;
2753 if (tag & QUE_BUFTAG_BIT)
2754 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2755 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2758 return &hbq_entry->dbuf;
2762 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2763 * @phba: Pointer to HBA context object.
2764 * @pring: Pointer to driver SLI ring object.
2765 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2766 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2767 * @fch_type: the type for the first frame of the sequence.
2769 * This function is called with no lock held. This function uses the r_ctl and
2770 * type of the received sequence to find the correct callback function to call
2771 * to process the sequence.
2774 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2775 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2782 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2788 /* unSolicited Responses */
2789 if (pring->prt[0].profile) {
2790 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2791 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2795 /* We must search, based on rctl / type
2796 for the right routine */
2797 for (i = 0; i < pring->num_mask; i++) {
2798 if ((pring->prt[i].rctl == fch_r_ctl) &&
2799 (pring->prt[i].type == fch_type)) {
2800 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2801 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2802 (phba, pring, saveq);
2810 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2811 * @phba: Pointer to HBA context object.
2812 * @pring: Pointer to driver SLI ring object.
2813 * @saveq: Pointer to the unsolicited iocb.
2815 * This function is called with no lock held by the ring event handler
2816 * when there is an unsolicited iocb posted to the response ring by the
2817 * firmware. This function gets the buffer associated with the iocbs
2818 * and calls the event handler for the ring. This function handles both
2819 * qring buffers and hbq buffers.
2820 * When the function returns 1 the caller can free the iocb object otherwise
2821 * upper layer functions will free the iocb objects.
2824 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2825 struct lpfc_iocbq *saveq)
2829 uint32_t Rctl, Type;
2830 struct lpfc_iocbq *iocbq;
2831 struct lpfc_dmabuf *dmzbuf;
2833 irsp = &(saveq->iocb);
2835 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2836 if (pring->lpfc_sli_rcv_async_status)
2837 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2839 lpfc_printf_log(phba,
2842 "0316 Ring %d handler: unexpected "
2843 "ASYNC_STATUS iocb received evt_code "
2846 irsp->un.asyncstat.evt_code);
2850 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2851 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2852 if (irsp->ulpBdeCount > 0) {
2853 dmzbuf = lpfc_sli_get_buff(phba, pring,
2854 irsp->un.ulpWord[3]);
2855 lpfc_in_buf_free(phba, dmzbuf);
2858 if (irsp->ulpBdeCount > 1) {
2859 dmzbuf = lpfc_sli_get_buff(phba, pring,
2860 irsp->unsli3.sli3Words[3]);
2861 lpfc_in_buf_free(phba, dmzbuf);
2864 if (irsp->ulpBdeCount > 2) {
2865 dmzbuf = lpfc_sli_get_buff(phba, pring,
2866 irsp->unsli3.sli3Words[7]);
2867 lpfc_in_buf_free(phba, dmzbuf);
2873 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2874 if (irsp->ulpBdeCount != 0) {
2875 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2876 irsp->un.ulpWord[3]);
2877 if (!saveq->context2)
2878 lpfc_printf_log(phba,
2881 "0341 Ring %d Cannot find buffer for "
2882 "an unsolicited iocb. tag 0x%x\n",
2884 irsp->un.ulpWord[3]);
2886 if (irsp->ulpBdeCount == 2) {
2887 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2888 irsp->unsli3.sli3Words[7]);
2889 if (!saveq->context3)
2890 lpfc_printf_log(phba,
2893 "0342 Ring %d Cannot find buffer for an"
2894 " unsolicited iocb. tag 0x%x\n",
2896 irsp->unsli3.sli3Words[7]);
2898 list_for_each_entry(iocbq, &saveq->list, list) {
2899 irsp = &(iocbq->iocb);
2900 if (irsp->ulpBdeCount != 0) {
2901 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2902 irsp->un.ulpWord[3]);
2903 if (!iocbq->context2)
2904 lpfc_printf_log(phba,
2907 "0343 Ring %d Cannot find "
2908 "buffer for an unsolicited iocb"
2909 ". tag 0x%x\n", pring->ringno,
2910 irsp->un.ulpWord[3]);
2912 if (irsp->ulpBdeCount == 2) {
2913 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2914 irsp->unsli3.sli3Words[7]);
2915 if (!iocbq->context3)
2916 lpfc_printf_log(phba,
2919 "0344 Ring %d Cannot find "
2920 "buffer for an unsolicited "
2923 irsp->unsli3.sli3Words[7]);
2927 if (irsp->ulpBdeCount != 0 &&
2928 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2929 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2932 /* search continue save q for same XRI */
2933 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2934 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2935 saveq->iocb.unsli3.rcvsli3.ox_id) {
2936 list_add_tail(&saveq->list, &iocbq->list);
2942 list_add_tail(&saveq->clist,
2943 &pring->iocb_continue_saveq);
2944 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2945 list_del_init(&iocbq->clist);
2947 irsp = &(saveq->iocb);
2951 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2952 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2953 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2954 Rctl = FC_RCTL_ELS_REQ;
2957 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2958 Rctl = w5p->hcsw.Rctl;
2959 Type = w5p->hcsw.Type;
2961 /* Firmware Workaround */
2962 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2963 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2964 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2965 Rctl = FC_RCTL_ELS_REQ;
2967 w5p->hcsw.Rctl = Rctl;
2968 w5p->hcsw.Type = Type;
2972 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2973 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2974 "0313 Ring %d handler: unexpected Rctl x%x "
2975 "Type x%x received\n",
2976 pring->ringno, Rctl, Type);
2982 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2983 * @phba: Pointer to HBA context object.
2984 * @pring: Pointer to driver SLI ring object.
2985 * @prspiocb: Pointer to response iocb object.
2987 * This function looks up the iocb_lookup table to get the command iocb
2988 * corresponding to the given response iocb using the iotag of the
2989 * response iocb. This function is called with the hbalock held
2990 * for sli3 devices or the ring_lock for sli4 devices.
2991 * This function returns the command iocb object if it finds the command
2992 * iocb else returns NULL.
2994 static struct lpfc_iocbq *
2995 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2996 struct lpfc_sli_ring *pring,
2997 struct lpfc_iocbq *prspiocb)
2999 struct lpfc_iocbq *cmd_iocb = NULL;
3001 lockdep_assert_held(&phba->hbalock);
3003 iotag = prspiocb->iocb.ulpIoTag;
3005 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3006 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3007 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3008 /* remove from txcmpl queue list */
3009 list_del_init(&cmd_iocb->list);
3010 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3015 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3016 "0317 iotag x%x is out of "
3017 "range: max iotag x%x wd0 x%x\n",
3018 iotag, phba->sli.last_iotag,
3019 *(((uint32_t *) &prspiocb->iocb) + 7));
3024 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3025 * @phba: Pointer to HBA context object.
3026 * @pring: Pointer to driver SLI ring object.
3029 * This function looks up the iocb_lookup table to get the command iocb
3030 * corresponding to the given iotag. This function is called with the
3032 * This function returns the command iocb object if it finds the command
3033 * iocb else returns NULL.
3035 static struct lpfc_iocbq *
3036 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3037 struct lpfc_sli_ring *pring, uint16_t iotag)
3039 struct lpfc_iocbq *cmd_iocb = NULL;
3041 lockdep_assert_held(&phba->hbalock);
3042 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3043 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3044 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3045 /* remove from txcmpl queue list */
3046 list_del_init(&cmd_iocb->list);
3047 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3053 "0372 iotag x%x lookup error: max iotag (x%x) "
3055 iotag, phba->sli.last_iotag,
3056 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3061 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3062 * @phba: Pointer to HBA context object.
3063 * @pring: Pointer to driver SLI ring object.
3064 * @saveq: Pointer to the response iocb to be processed.
3066 * This function is called by the ring event handler for non-fcp
3067 * rings when there is a new response iocb in the response ring.
3068 * The caller is not required to hold any locks. This function
3069 * gets the command iocb associated with the response iocb and
3070 * calls the completion handler for the command iocb. If there
3071 * is no completion handler, the function will free the resources
3072 * associated with command iocb. If the response iocb is for
3073 * an already aborted command iocb, the status of the completion
3074 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3075 * This function always returns 1.
3078 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3079 struct lpfc_iocbq *saveq)
3081 struct lpfc_iocbq *cmdiocbp;
3083 unsigned long iflag;
3085 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
3086 if (phba->sli_rev == LPFC_SLI_REV4)
3087 spin_lock_irqsave(&pring->ring_lock, iflag);
3089 spin_lock_irqsave(&phba->hbalock, iflag);
3090 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3091 if (phba->sli_rev == LPFC_SLI_REV4)
3092 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3094 spin_unlock_irqrestore(&phba->hbalock, iflag);
3097 if (cmdiocbp->iocb_cmpl) {
3099 * If an ELS command failed send an event to mgmt
3102 if (saveq->iocb.ulpStatus &&
3103 (pring->ringno == LPFC_ELS_RING) &&
3104 (cmdiocbp->iocb.ulpCommand ==
3105 CMD_ELS_REQUEST64_CR))
3106 lpfc_send_els_failure_event(phba,
3110 * Post all ELS completions to the worker thread.
3111 * All other are passed to the completion callback.
3113 if (pring->ringno == LPFC_ELS_RING) {
3114 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3115 (cmdiocbp->iocb_flag &
3116 LPFC_DRIVER_ABORTED)) {
3117 spin_lock_irqsave(&phba->hbalock,
3119 cmdiocbp->iocb_flag &=
3120 ~LPFC_DRIVER_ABORTED;
3121 spin_unlock_irqrestore(&phba->hbalock,
3123 saveq->iocb.ulpStatus =
3124 IOSTAT_LOCAL_REJECT;
3125 saveq->iocb.un.ulpWord[4] =
3128 /* Firmware could still be in progress
3129 * of DMAing payload, so don't free data
3130 * buffer till after a hbeat.
3132 spin_lock_irqsave(&phba->hbalock,
3134 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3135 spin_unlock_irqrestore(&phba->hbalock,
3138 if (phba->sli_rev == LPFC_SLI_REV4) {
3139 if (saveq->iocb_flag &
3140 LPFC_EXCHANGE_BUSY) {
3141 /* Set cmdiocb flag for the
3142 * exchange busy so sgl (xri)
3143 * will not be released until
3144 * the abort xri is received
3148 &phba->hbalock, iflag);
3149 cmdiocbp->iocb_flag |=
3151 spin_unlock_irqrestore(
3152 &phba->hbalock, iflag);
3154 if (cmdiocbp->iocb_flag &
3155 LPFC_DRIVER_ABORTED) {
3157 * Clear LPFC_DRIVER_ABORTED
3158 * bit in case it was driver
3162 &phba->hbalock, iflag);
3163 cmdiocbp->iocb_flag &=
3164 ~LPFC_DRIVER_ABORTED;
3165 spin_unlock_irqrestore(
3166 &phba->hbalock, iflag);
3167 cmdiocbp->iocb.ulpStatus =
3168 IOSTAT_LOCAL_REJECT;
3169 cmdiocbp->iocb.un.ulpWord[4] =
3170 IOERR_ABORT_REQUESTED;
3172 * For SLI4, irsiocb contains
3173 * NO_XRI in sli_xritag, it
3174 * shall not affect releasing
3175 * sgl (xri) process.
3177 saveq->iocb.ulpStatus =
3178 IOSTAT_LOCAL_REJECT;
3179 saveq->iocb.un.ulpWord[4] =
3182 &phba->hbalock, iflag);
3184 LPFC_DELAY_MEM_FREE;
3185 spin_unlock_irqrestore(
3186 &phba->hbalock, iflag);
3190 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3192 lpfc_sli_release_iocbq(phba, cmdiocbp);
3195 * Unknown initiating command based on the response iotag.
3196 * This could be the case on the ELS ring because of
3199 if (pring->ringno != LPFC_ELS_RING) {
3201 * Ring <ringno> handler: unexpected completion IoTag
3204 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3205 "0322 Ring %d handler: "
3206 "unexpected completion IoTag x%x "
3207 "Data: x%x x%x x%x x%x\n",
3209 saveq->iocb.ulpIoTag,
3210 saveq->iocb.ulpStatus,
3211 saveq->iocb.un.ulpWord[4],
3212 saveq->iocb.ulpCommand,
3213 saveq->iocb.ulpContext);
3221 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3222 * @phba: Pointer to HBA context object.
3223 * @pring: Pointer to driver SLI ring object.
3225 * This function is called from the iocb ring event handlers when
3226 * put pointer is ahead of the get pointer for a ring. This function signal
3227 * an error attention condition to the worker thread and the worker
3228 * thread will transition the HBA to offline state.
3231 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3233 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3235 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3236 * rsp ring <portRspMax>
3238 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3239 "0312 Ring %d handler: portRspPut %d "
3240 "is bigger than rsp ring %d\n",
3241 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3242 pring->sli.sli3.numRiocb);
3244 phba->link_state = LPFC_HBA_ERROR;
3247 * All error attention handlers are posted to
3250 phba->work_ha |= HA_ERATT;
3251 phba->work_hs = HS_FFER3;
3253 lpfc_worker_wake_up(phba);
3259 * lpfc_poll_eratt - Error attention polling timer timeout handler
3260 * @ptr: Pointer to address of HBA context object.
3262 * This function is invoked by the Error Attention polling timer when the
3263 * timer times out. It will check the SLI Error Attention register for
3264 * possible attention events. If so, it will post an Error Attention event
3265 * and wake up worker thread to process it. Otherwise, it will set up the
3266 * Error Attention polling timer for the next poll.
3268 void lpfc_poll_eratt(struct timer_list *t)
3270 struct lpfc_hba *phba;
3272 uint64_t sli_intr, cnt;
3274 phba = from_timer(phba, t, eratt_poll);
3276 /* Here we will also keep track of interrupts per sec of the hba */
3277 sli_intr = phba->sli.slistat.sli_intr;
3279 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3280 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3283 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3285 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3286 do_div(cnt, phba->eratt_poll_interval);
3287 phba->sli.slistat.sli_ips = cnt;
3289 phba->sli.slistat.sli_prev_intr = sli_intr;
3291 /* Check chip HA register for error event */
3292 eratt = lpfc_sli_check_eratt(phba);
3295 /* Tell the worker thread there is work to do */
3296 lpfc_worker_wake_up(phba);
3298 /* Restart the timer for next eratt poll */
3299 mod_timer(&phba->eratt_poll,
3301 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3307 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3308 * @phba: Pointer to HBA context object.
3309 * @pring: Pointer to driver SLI ring object.
3310 * @mask: Host attention register mask for this ring.
3312 * This function is called from the interrupt context when there is a ring
3313 * event for the fcp ring. The caller does not hold any lock.
3314 * The function processes each response iocb in the response ring until it
3315 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3316 * LE bit set. The function will call the completion handler of the command iocb
3317 * if the response iocb indicates a completion for a command iocb or it is
3318 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3319 * function if this is an unsolicited iocb.
3320 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3321 * to check it explicitly.
3324 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3325 struct lpfc_sli_ring *pring, uint32_t mask)
3327 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3328 IOCB_t *irsp = NULL;
3329 IOCB_t *entry = NULL;
3330 struct lpfc_iocbq *cmdiocbq = NULL;
3331 struct lpfc_iocbq rspiocbq;
3333 uint32_t portRspPut, portRspMax;
3335 lpfc_iocb_type type;
3336 unsigned long iflag;
3337 uint32_t rsp_cmpl = 0;
3339 spin_lock_irqsave(&phba->hbalock, iflag);
3340 pring->stats.iocb_event++;
3343 * The next available response entry should never exceed the maximum
3344 * entries. If it does, treat it as an adapter hardware error.
3346 portRspMax = pring->sli.sli3.numRiocb;
3347 portRspPut = le32_to_cpu(pgp->rspPutInx);
3348 if (unlikely(portRspPut >= portRspMax)) {
3349 lpfc_sli_rsp_pointers_error(phba, pring);
3350 spin_unlock_irqrestore(&phba->hbalock, iflag);
3353 if (phba->fcp_ring_in_use) {
3354 spin_unlock_irqrestore(&phba->hbalock, iflag);
3357 phba->fcp_ring_in_use = 1;
3360 while (pring->sli.sli3.rspidx != portRspPut) {
3362 * Fetch an entry off the ring and copy it into a local data
3363 * structure. The copy involves a byte-swap since the
3364 * network byte order and pci byte orders are different.
3366 entry = lpfc_resp_iocb(phba, pring);
3367 phba->last_completion_time = jiffies;
3369 if (++pring->sli.sli3.rspidx >= portRspMax)
3370 pring->sli.sli3.rspidx = 0;
3372 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3373 (uint32_t *) &rspiocbq.iocb,
3374 phba->iocb_rsp_size);
3375 INIT_LIST_HEAD(&(rspiocbq.list));
3376 irsp = &rspiocbq.iocb;
3378 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3379 pring->stats.iocb_rsp++;
3382 if (unlikely(irsp->ulpStatus)) {
3384 * If resource errors reported from HBA, reduce
3385 * queuedepths of the SCSI device.
3387 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3388 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3389 IOERR_NO_RESOURCES)) {
3390 spin_unlock_irqrestore(&phba->hbalock, iflag);
3391 phba->lpfc_rampdown_queue_depth(phba);
3392 spin_lock_irqsave(&phba->hbalock, iflag);
3395 /* Rsp ring <ringno> error: IOCB */
3396 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3397 "0336 Rsp Ring %d error: IOCB Data: "
3398 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3400 irsp->un.ulpWord[0],
3401 irsp->un.ulpWord[1],
3402 irsp->un.ulpWord[2],
3403 irsp->un.ulpWord[3],
3404 irsp->un.ulpWord[4],
3405 irsp->un.ulpWord[5],
3406 *(uint32_t *)&irsp->un1,
3407 *((uint32_t *)&irsp->un1 + 1));
3411 case LPFC_ABORT_IOCB:
3414 * Idle exchange closed via ABTS from port. No iocb
3415 * resources need to be recovered.
3417 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3419 "0333 IOCB cmd 0x%x"
3420 " processed. Skipping"
3426 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3428 if (unlikely(!cmdiocbq))
3430 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3431 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3432 if (cmdiocbq->iocb_cmpl) {
3433 spin_unlock_irqrestore(&phba->hbalock, iflag);
3434 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3436 spin_lock_irqsave(&phba->hbalock, iflag);
3439 case LPFC_UNSOL_IOCB:
3440 spin_unlock_irqrestore(&phba->hbalock, iflag);
3441 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3442 spin_lock_irqsave(&phba->hbalock, iflag);
3445 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3446 char adaptermsg[LPFC_MAX_ADPTMSG];
3447 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3448 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3450 dev_warn(&((phba->pcidev)->dev),
3452 phba->brd_no, adaptermsg);
3454 /* Unknown IOCB command */
3455 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3456 "0334 Unknown IOCB command "
3457 "Data: x%x, x%x x%x x%x x%x\n",
3458 type, irsp->ulpCommand,
3467 * The response IOCB has been processed. Update the ring
3468 * pointer in SLIM. If the port response put pointer has not
3469 * been updated, sync the pgp->rspPutInx and fetch the new port
3470 * response put pointer.
3472 writel(pring->sli.sli3.rspidx,
3473 &phba->host_gp[pring->ringno].rspGetInx);
3475 if (pring->sli.sli3.rspidx == portRspPut)
3476 portRspPut = le32_to_cpu(pgp->rspPutInx);
3479 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3480 pring->stats.iocb_rsp_full++;
3481 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3482 writel(status, phba->CAregaddr);
3483 readl(phba->CAregaddr);
3485 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3486 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3487 pring->stats.iocb_cmd_empty++;
3489 /* Force update of the local copy of cmdGetInx */
3490 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3491 lpfc_sli_resume_iocb(phba, pring);
3493 if ((pring->lpfc_sli_cmd_available))
3494 (pring->lpfc_sli_cmd_available) (phba, pring);
3498 phba->fcp_ring_in_use = 0;
3499 spin_unlock_irqrestore(&phba->hbalock, iflag);
3504 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3505 * @phba: Pointer to HBA context object.
3506 * @pring: Pointer to driver SLI ring object.
3507 * @rspiocbp: Pointer to driver response IOCB object.
3509 * This function is called from the worker thread when there is a slow-path
3510 * response IOCB to process. This function chains all the response iocbs until
3511 * seeing the iocb with the LE bit set. The function will call
3512 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3513 * completion of a command iocb. The function will call the
3514 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3515 * The function frees the resources or calls the completion handler if this
3516 * iocb is an abort completion. The function returns NULL when the response
3517 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3518 * this function shall chain the iocb on to the iocb_continueq and return the
3519 * response iocb passed in.
3521 static struct lpfc_iocbq *
3522 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3523 struct lpfc_iocbq *rspiocbp)
3525 struct lpfc_iocbq *saveq;
3526 struct lpfc_iocbq *cmdiocbp;
3527 struct lpfc_iocbq *next_iocb;
3528 IOCB_t *irsp = NULL;
3529 uint32_t free_saveq;
3530 uint8_t iocb_cmd_type;
3531 lpfc_iocb_type type;
3532 unsigned long iflag;
3535 spin_lock_irqsave(&phba->hbalock, iflag);
3536 /* First add the response iocb to the countinueq list */
3537 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3538 pring->iocb_continueq_cnt++;
3540 /* Now, determine whether the list is completed for processing */
3541 irsp = &rspiocbp->iocb;
3544 * By default, the driver expects to free all resources
3545 * associated with this iocb completion.
3548 saveq = list_get_first(&pring->iocb_continueq,
3549 struct lpfc_iocbq, list);
3550 irsp = &(saveq->iocb);
3551 list_del_init(&pring->iocb_continueq);
3552 pring->iocb_continueq_cnt = 0;
3554 pring->stats.iocb_rsp++;
3557 * If resource errors reported from HBA, reduce
3558 * queuedepths of the SCSI device.
3560 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3561 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3562 IOERR_NO_RESOURCES)) {
3563 spin_unlock_irqrestore(&phba->hbalock, iflag);
3564 phba->lpfc_rampdown_queue_depth(phba);
3565 spin_lock_irqsave(&phba->hbalock, iflag);
3568 if (irsp->ulpStatus) {
3569 /* Rsp ring <ringno> error: IOCB */
3570 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3571 "0328 Rsp Ring %d error: "
3576 "x%x x%x x%x x%x\n",
3578 irsp->un.ulpWord[0],
3579 irsp->un.ulpWord[1],
3580 irsp->un.ulpWord[2],
3581 irsp->un.ulpWord[3],
3582 irsp->un.ulpWord[4],
3583 irsp->un.ulpWord[5],
3584 *(((uint32_t *) irsp) + 6),
3585 *(((uint32_t *) irsp) + 7),
3586 *(((uint32_t *) irsp) + 8),
3587 *(((uint32_t *) irsp) + 9),
3588 *(((uint32_t *) irsp) + 10),
3589 *(((uint32_t *) irsp) + 11),
3590 *(((uint32_t *) irsp) + 12),
3591 *(((uint32_t *) irsp) + 13),
3592 *(((uint32_t *) irsp) + 14),
3593 *(((uint32_t *) irsp) + 15));
3597 * Fetch the IOCB command type and call the correct completion
3598 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3599 * get freed back to the lpfc_iocb_list by the discovery
3602 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3603 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3606 spin_unlock_irqrestore(&phba->hbalock, iflag);
3607 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3608 spin_lock_irqsave(&phba->hbalock, iflag);
3611 case LPFC_UNSOL_IOCB:
3612 spin_unlock_irqrestore(&phba->hbalock, iflag);
3613 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3614 spin_lock_irqsave(&phba->hbalock, iflag);
3619 case LPFC_ABORT_IOCB:
3621 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3622 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3625 /* Call the specified completion routine */
3626 if (cmdiocbp->iocb_cmpl) {
3627 spin_unlock_irqrestore(&phba->hbalock,
3629 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3631 spin_lock_irqsave(&phba->hbalock,
3634 __lpfc_sli_release_iocbq(phba,
3639 case LPFC_UNKNOWN_IOCB:
3640 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3641 char adaptermsg[LPFC_MAX_ADPTMSG];
3642 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3643 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3645 dev_warn(&((phba->pcidev)->dev),
3647 phba->brd_no, adaptermsg);
3649 /* Unknown IOCB command */
3650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3651 "0335 Unknown IOCB "
3652 "command Data: x%x "
3663 list_for_each_entry_safe(rspiocbp, next_iocb,
3664 &saveq->list, list) {
3665 list_del_init(&rspiocbp->list);
3666 __lpfc_sli_release_iocbq(phba, rspiocbp);
3668 __lpfc_sli_release_iocbq(phba, saveq);
3672 spin_unlock_irqrestore(&phba->hbalock, iflag);
3677 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3678 * @phba: Pointer to HBA context object.
3679 * @pring: Pointer to driver SLI ring object.
3680 * @mask: Host attention register mask for this ring.
3682 * This routine wraps the actual slow_ring event process routine from the
3683 * API jump table function pointer from the lpfc_hba struct.
3686 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3687 struct lpfc_sli_ring *pring, uint32_t mask)
3689 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3693 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3694 * @phba: Pointer to HBA context object.
3695 * @pring: Pointer to driver SLI ring object.
3696 * @mask: Host attention register mask for this ring.
3698 * This function is called from the worker thread when there is a ring event
3699 * for non-fcp rings. The caller does not hold any lock. The function will
3700 * remove each response iocb in the response ring and calls the handle
3701 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3704 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3705 struct lpfc_sli_ring *pring, uint32_t mask)
3707 struct lpfc_pgp *pgp;
3709 IOCB_t *irsp = NULL;
3710 struct lpfc_iocbq *rspiocbp = NULL;
3711 uint32_t portRspPut, portRspMax;
3712 unsigned long iflag;
3715 pgp = &phba->port_gp[pring->ringno];
3716 spin_lock_irqsave(&phba->hbalock, iflag);
3717 pring->stats.iocb_event++;
3720 * The next available response entry should never exceed the maximum
3721 * entries. If it does, treat it as an adapter hardware error.
3723 portRspMax = pring->sli.sli3.numRiocb;
3724 portRspPut = le32_to_cpu(pgp->rspPutInx);
3725 if (portRspPut >= portRspMax) {
3727 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3728 * rsp ring <portRspMax>
3730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3731 "0303 Ring %d handler: portRspPut %d "
3732 "is bigger than rsp ring %d\n",
3733 pring->ringno, portRspPut, portRspMax);
3735 phba->link_state = LPFC_HBA_ERROR;
3736 spin_unlock_irqrestore(&phba->hbalock, iflag);
3738 phba->work_hs = HS_FFER3;
3739 lpfc_handle_eratt(phba);
3745 while (pring->sli.sli3.rspidx != portRspPut) {
3747 * Build a completion list and call the appropriate handler.
3748 * The process is to get the next available response iocb, get
3749 * a free iocb from the list, copy the response data into the
3750 * free iocb, insert to the continuation list, and update the
3751 * next response index to slim. This process makes response
3752 * iocb's in the ring available to DMA as fast as possible but
3753 * pays a penalty for a copy operation. Since the iocb is
3754 * only 32 bytes, this penalty is considered small relative to
3755 * the PCI reads for register values and a slim write. When
3756 * the ulpLe field is set, the entire Command has been
3759 entry = lpfc_resp_iocb(phba, pring);
3761 phba->last_completion_time = jiffies;
3762 rspiocbp = __lpfc_sli_get_iocbq(phba);
3763 if (rspiocbp == NULL) {
3764 printk(KERN_ERR "%s: out of buffers! Failing "
3765 "completion.\n", __func__);
3769 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3770 phba->iocb_rsp_size);
3771 irsp = &rspiocbp->iocb;
3773 if (++pring->sli.sli3.rspidx >= portRspMax)
3774 pring->sli.sli3.rspidx = 0;
3776 if (pring->ringno == LPFC_ELS_RING) {
3777 lpfc_debugfs_slow_ring_trc(phba,
3778 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3779 *(((uint32_t *) irsp) + 4),
3780 *(((uint32_t *) irsp) + 6),
3781 *(((uint32_t *) irsp) + 7));
3784 writel(pring->sli.sli3.rspidx,
3785 &phba->host_gp[pring->ringno].rspGetInx);
3787 spin_unlock_irqrestore(&phba->hbalock, iflag);
3788 /* Handle the response IOCB */
3789 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3790 spin_lock_irqsave(&phba->hbalock, iflag);
3793 * If the port response put pointer has not been updated, sync
3794 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3795 * response put pointer.
3797 if (pring->sli.sli3.rspidx == portRspPut) {
3798 portRspPut = le32_to_cpu(pgp->rspPutInx);
3800 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3802 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3803 /* At least one response entry has been freed */
3804 pring->stats.iocb_rsp_full++;
3805 /* SET RxRE_RSP in Chip Att register */
3806 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3807 writel(status, phba->CAregaddr);
3808 readl(phba->CAregaddr); /* flush */
3810 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3811 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3812 pring->stats.iocb_cmd_empty++;
3814 /* Force update of the local copy of cmdGetInx */
3815 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3816 lpfc_sli_resume_iocb(phba, pring);
3818 if ((pring->lpfc_sli_cmd_available))
3819 (pring->lpfc_sli_cmd_available) (phba, pring);
3823 spin_unlock_irqrestore(&phba->hbalock, iflag);
3828 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3829 * @phba: Pointer to HBA context object.
3830 * @pring: Pointer to driver SLI ring object.
3831 * @mask: Host attention register mask for this ring.
3833 * This function is called from the worker thread when there is a pending
3834 * ELS response iocb on the driver internal slow-path response iocb worker
3835 * queue. The caller does not hold any lock. The function will remove each
3836 * response iocb from the response worker queue and calls the handle
3837 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3840 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3841 struct lpfc_sli_ring *pring, uint32_t mask)
3843 struct lpfc_iocbq *irspiocbq;
3844 struct hbq_dmabuf *dmabuf;
3845 struct lpfc_cq_event *cq_event;
3846 unsigned long iflag;
3849 spin_lock_irqsave(&phba->hbalock, iflag);
3850 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3851 spin_unlock_irqrestore(&phba->hbalock, iflag);
3852 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3853 /* Get the response iocb from the head of work queue */
3854 spin_lock_irqsave(&phba->hbalock, iflag);
3855 list_remove_head(&phba->sli4_hba.sp_queue_event,
3856 cq_event, struct lpfc_cq_event, list);
3857 spin_unlock_irqrestore(&phba->hbalock, iflag);
3859 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3860 case CQE_CODE_COMPL_WQE:
3861 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3863 /* Translate ELS WCQE to response IOCBQ */
3864 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3867 lpfc_sli_sp_handle_rspiocb(phba, pring,
3871 case CQE_CODE_RECEIVE:
3872 case CQE_CODE_RECEIVE_V1:
3873 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3875 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3882 /* Limit the number of events to 64 to avoid soft lockups */
3889 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3890 * @phba: Pointer to HBA context object.
3891 * @pring: Pointer to driver SLI ring object.
3893 * This function aborts all iocbs in the given ring and frees all the iocb
3894 * objects in txq. This function issues an abort iocb for all the iocb commands
3895 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3896 * the return of this function. The caller is not required to hold any locks.
3899 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3901 LIST_HEAD(completions);
3902 struct lpfc_iocbq *iocb, *next_iocb;
3904 if (pring->ringno == LPFC_ELS_RING) {
3905 lpfc_fabric_abort_hba(phba);
3908 /* Error everything on txq and txcmplq
3911 if (phba->sli_rev >= LPFC_SLI_REV4) {
3912 spin_lock_irq(&pring->ring_lock);
3913 list_splice_init(&pring->txq, &completions);
3915 spin_unlock_irq(&pring->ring_lock);
3917 spin_lock_irq(&phba->hbalock);
3918 /* Next issue ABTS for everything on the txcmplq */
3919 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3920 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3921 spin_unlock_irq(&phba->hbalock);
3923 spin_lock_irq(&phba->hbalock);
3924 list_splice_init(&pring->txq, &completions);
3927 /* Next issue ABTS for everything on the txcmplq */
3928 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3929 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3930 spin_unlock_irq(&phba->hbalock);
3933 /* Cancel all the IOCBs from the completions list */
3934 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3939 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3940 * @phba: Pointer to HBA context object.
3941 * @pring: Pointer to driver SLI ring object.
3943 * This function aborts all iocbs in the given ring and frees all the iocb
3944 * objects in txq. This function issues an abort iocb for all the iocb commands
3945 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3946 * the return of this function. The caller is not required to hold any locks.
3949 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3951 LIST_HEAD(completions);
3952 struct lpfc_iocbq *iocb, *next_iocb;
3954 if (pring->ringno == LPFC_ELS_RING)
3955 lpfc_fabric_abort_hba(phba);
3957 spin_lock_irq(&phba->hbalock);
3958 /* Next issue ABTS for everything on the txcmplq */
3959 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3960 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3961 spin_unlock_irq(&phba->hbalock);
3966 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3967 * @phba: Pointer to HBA context object.
3968 * @pring: Pointer to driver SLI ring object.
3970 * This function aborts all iocbs in FCP rings and frees all the iocb
3971 * objects in txq. This function issues an abort iocb for all the iocb commands
3972 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3973 * the return of this function. The caller is not required to hold any locks.
3976 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3978 struct lpfc_sli *psli = &phba->sli;
3979 struct lpfc_sli_ring *pring;
3982 /* Look on all the FCP Rings for the iotag */
3983 if (phba->sli_rev >= LPFC_SLI_REV4) {
3984 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3985 pring = phba->sli4_hba.fcp_wq[i]->pring;
3986 lpfc_sli_abort_iocb_ring(phba, pring);
3989 pring = &psli->sli3_ring[LPFC_FCP_RING];
3990 lpfc_sli_abort_iocb_ring(phba, pring);
3995 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3996 * @phba: Pointer to HBA context object.
3998 * This function aborts all wqes in NVME rings. This function issues an
3999 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
4000 * the txcmplq is not guaranteed to complete before the return of this
4001 * function. The caller is not required to hold any locks.
4004 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
4006 struct lpfc_sli_ring *pring;
4009 if (phba->sli_rev < LPFC_SLI_REV4)
4012 /* Abort all IO on each NVME ring. */
4013 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4014 pring = phba->sli4_hba.nvme_wq[i]->pring;
4015 lpfc_sli_abort_wqe_ring(phba, pring);
4021 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
4022 * @phba: Pointer to HBA context object.
4024 * This function flushes all iocbs in the fcp ring and frees all the iocb
4025 * objects in txq and txcmplq. This function will not issue abort iocbs
4026 * for all the iocb commands in txcmplq, they will just be returned with
4027 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4028 * slot has been permanently disabled.
4031 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4035 struct lpfc_sli *psli = &phba->sli;
4036 struct lpfc_sli_ring *pring;
4038 struct lpfc_iocbq *piocb, *next_iocb;
4040 spin_lock_irq(&phba->hbalock);
4041 /* Indicate the I/O queues are flushed */
4042 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
4043 spin_unlock_irq(&phba->hbalock);
4045 /* Look on all the FCP Rings for the iotag */
4046 if (phba->sli_rev >= LPFC_SLI_REV4) {
4047 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
4048 pring = phba->sli4_hba.fcp_wq[i]->pring;
4050 spin_lock_irq(&pring->ring_lock);
4051 /* Retrieve everything on txq */
4052 list_splice_init(&pring->txq, &txq);
4053 list_for_each_entry_safe(piocb, next_iocb,
4054 &pring->txcmplq, list)
4055 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4056 /* Retrieve everything on the txcmplq */
4057 list_splice_init(&pring->txcmplq, &txcmplq);
4059 pring->txcmplq_cnt = 0;
4060 spin_unlock_irq(&pring->ring_lock);
4063 lpfc_sli_cancel_iocbs(phba, &txq,
4064 IOSTAT_LOCAL_REJECT,
4066 /* Flush the txcmpq */
4067 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4068 IOSTAT_LOCAL_REJECT,
4072 pring = &psli->sli3_ring[LPFC_FCP_RING];
4074 spin_lock_irq(&phba->hbalock);
4075 /* Retrieve everything on txq */
4076 list_splice_init(&pring->txq, &txq);
4077 list_for_each_entry_safe(piocb, next_iocb,
4078 &pring->txcmplq, list)
4079 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4080 /* Retrieve everything on the txcmplq */
4081 list_splice_init(&pring->txcmplq, &txcmplq);
4083 pring->txcmplq_cnt = 0;
4084 spin_unlock_irq(&phba->hbalock);
4087 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4089 /* Flush the txcmpq */
4090 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4096 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4097 * @phba: Pointer to HBA context object.
4099 * This function flushes all wqes in the nvme rings and frees all resources
4100 * in the txcmplq. This function does not issue abort wqes for the IO
4101 * commands in txcmplq, they will just be returned with
4102 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4103 * slot has been permanently disabled.
4106 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4109 struct lpfc_sli_ring *pring;
4111 struct lpfc_iocbq *piocb, *next_iocb;
4113 if (phba->sli_rev < LPFC_SLI_REV4)
4116 /* Hint to other driver operations that a flush is in progress. */
4117 spin_lock_irq(&phba->hbalock);
4118 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4119 spin_unlock_irq(&phba->hbalock);
4121 /* Cycle through all NVME rings and complete each IO with
4122 * a local driver reason code. This is a flush so no
4123 * abort exchange to FW.
4125 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4126 pring = phba->sli4_hba.nvme_wq[i]->pring;
4128 spin_lock_irq(&pring->ring_lock);
4129 list_for_each_entry_safe(piocb, next_iocb,
4130 &pring->txcmplq, list)
4131 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4132 /* Retrieve everything on the txcmplq */
4133 list_splice_init(&pring->txcmplq, &txcmplq);
4134 pring->txcmplq_cnt = 0;
4135 spin_unlock_irq(&pring->ring_lock);
4137 /* Flush the txcmpq &&&PAE */
4138 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4139 IOSTAT_LOCAL_REJECT,
4145 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4146 * @phba: Pointer to HBA context object.
4147 * @mask: Bit mask to be checked.
4149 * This function reads the host status register and compares
4150 * with the provided bit mask to check if HBA completed
4151 * the restart. This function will wait in a loop for the
4152 * HBA to complete restart. If the HBA does not restart within
4153 * 15 iterations, the function will reset the HBA again. The
4154 * function returns 1 when HBA fail to restart otherwise returns
4158 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4164 /* Read the HBA Host Status Register */
4165 if (lpfc_readl(phba->HSregaddr, &status))
4169 * Check status register every 100ms for 5 retries, then every
4170 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4171 * every 2.5 sec for 4.
4172 * Break our of the loop if errors occurred during init.
4174 while (((status & mask) != mask) &&
4175 !(status & HS_FFERM) &&
4187 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4188 lpfc_sli_brdrestart(phba);
4190 /* Read the HBA Host Status Register */
4191 if (lpfc_readl(phba->HSregaddr, &status)) {
4197 /* Check to see if any errors occurred during init */
4198 if ((status & HS_FFERM) || (i >= 20)) {
4199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4200 "2751 Adapter failed to restart, "
4201 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4203 readl(phba->MBslimaddr + 0xa8),
4204 readl(phba->MBslimaddr + 0xac));
4205 phba->link_state = LPFC_HBA_ERROR;
4213 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4214 * @phba: Pointer to HBA context object.
4215 * @mask: Bit mask to be checked.
4217 * This function checks the host status register to check if HBA is
4218 * ready. This function will wait in a loop for the HBA to be ready
4219 * If the HBA is not ready , the function will will reset the HBA PCI
4220 * function again. The function returns 1 when HBA fail to be ready
4221 * otherwise returns zero.
4224 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4229 /* Read the HBA Host Status Register */
4230 status = lpfc_sli4_post_status_check(phba);
4233 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4234 lpfc_sli_brdrestart(phba);
4235 status = lpfc_sli4_post_status_check(phba);
4238 /* Check to see if any errors occurred during init */
4240 phba->link_state = LPFC_HBA_ERROR;
4243 phba->sli4_hba.intr_enable = 0;
4249 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4250 * @phba: Pointer to HBA context object.
4251 * @mask: Bit mask to be checked.
4253 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4254 * from the API jump table function pointer from the lpfc_hba struct.
4257 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4259 return phba->lpfc_sli_brdready(phba, mask);
4262 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4265 * lpfc_reset_barrier - Make HBA ready for HBA reset
4266 * @phba: Pointer to HBA context object.
4268 * This function is called before resetting an HBA. This function is called
4269 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4271 void lpfc_reset_barrier(struct lpfc_hba *phba)
4273 uint32_t __iomem *resp_buf;
4274 uint32_t __iomem *mbox_buf;
4275 volatile uint32_t mbox;
4276 uint32_t hc_copy, ha_copy, resp_data;
4280 lockdep_assert_held(&phba->hbalock);
4282 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4283 if (hdrtype != 0x80 ||
4284 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4285 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4289 * Tell the other part of the chip to suspend temporarily all
4292 resp_buf = phba->MBslimaddr;
4294 /* Disable the error attention */
4295 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4297 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4298 readl(phba->HCregaddr); /* flush */
4299 phba->link_flag |= LS_IGNORE_ERATT;
4301 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4303 if (ha_copy & HA_ERATT) {
4304 /* Clear Chip error bit */
4305 writel(HA_ERATT, phba->HAregaddr);
4306 phba->pport->stopped = 1;
4310 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4311 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4313 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4314 mbox_buf = phba->MBslimaddr;
4315 writel(mbox, mbox_buf);
4317 for (i = 0; i < 50; i++) {
4318 if (lpfc_readl((resp_buf + 1), &resp_data))
4320 if (resp_data != ~(BARRIER_TEST_PATTERN))
4326 if (lpfc_readl((resp_buf + 1), &resp_data))
4328 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4329 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4330 phba->pport->stopped)
4336 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4338 for (i = 0; i < 500; i++) {
4339 if (lpfc_readl(resp_buf, &resp_data))
4341 if (resp_data != mbox)
4350 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4352 if (!(ha_copy & HA_ERATT))
4358 if (readl(phba->HAregaddr) & HA_ERATT) {
4359 writel(HA_ERATT, phba->HAregaddr);
4360 phba->pport->stopped = 1;
4364 phba->link_flag &= ~LS_IGNORE_ERATT;
4365 writel(hc_copy, phba->HCregaddr);
4366 readl(phba->HCregaddr); /* flush */
4370 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4371 * @phba: Pointer to HBA context object.
4373 * This function issues a kill_board mailbox command and waits for
4374 * the error attention interrupt. This function is called for stopping
4375 * the firmware processing. The caller is not required to hold any
4376 * locks. This function calls lpfc_hba_down_post function to free
4377 * any pending commands after the kill. The function will return 1 when it
4378 * fails to kill the board else will return 0.
4381 lpfc_sli_brdkill(struct lpfc_hba *phba)
4383 struct lpfc_sli *psli;
4393 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4394 "0329 Kill HBA Data: x%x x%x\n",
4395 phba->pport->port_state, psli->sli_flag);
4397 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4401 /* Disable the error attention */
4402 spin_lock_irq(&phba->hbalock);
4403 if (lpfc_readl(phba->HCregaddr, &status)) {
4404 spin_unlock_irq(&phba->hbalock);
4405 mempool_free(pmb, phba->mbox_mem_pool);
4408 status &= ~HC_ERINT_ENA;
4409 writel(status, phba->HCregaddr);
4410 readl(phba->HCregaddr); /* flush */
4411 phba->link_flag |= LS_IGNORE_ERATT;
4412 spin_unlock_irq(&phba->hbalock);
4414 lpfc_kill_board(phba, pmb);
4415 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4416 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4418 if (retval != MBX_SUCCESS) {
4419 if (retval != MBX_BUSY)
4420 mempool_free(pmb, phba->mbox_mem_pool);
4421 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4422 "2752 KILL_BOARD command failed retval %d\n",
4424 spin_lock_irq(&phba->hbalock);
4425 phba->link_flag &= ~LS_IGNORE_ERATT;
4426 spin_unlock_irq(&phba->hbalock);
4430 spin_lock_irq(&phba->hbalock);
4431 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4432 spin_unlock_irq(&phba->hbalock);
4434 mempool_free(pmb, phba->mbox_mem_pool);
4436 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4437 * attention every 100ms for 3 seconds. If we don't get ERATT after
4438 * 3 seconds we still set HBA_ERROR state because the status of the
4439 * board is now undefined.
4441 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4443 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4445 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4449 del_timer_sync(&psli->mbox_tmo);
4450 if (ha_copy & HA_ERATT) {
4451 writel(HA_ERATT, phba->HAregaddr);
4452 phba->pport->stopped = 1;
4454 spin_lock_irq(&phba->hbalock);
4455 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4456 psli->mbox_active = NULL;
4457 phba->link_flag &= ~LS_IGNORE_ERATT;
4458 spin_unlock_irq(&phba->hbalock);
4460 lpfc_hba_down_post(phba);
4461 phba->link_state = LPFC_HBA_ERROR;
4463 return ha_copy & HA_ERATT ? 0 : 1;
4467 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4468 * @phba: Pointer to HBA context object.
4470 * This function resets the HBA by writing HC_INITFF to the control
4471 * register. After the HBA resets, this function resets all the iocb ring
4472 * indices. This function disables PCI layer parity checking during
4474 * This function returns 0 always.
4475 * The caller is not required to hold any locks.
4478 lpfc_sli_brdreset(struct lpfc_hba *phba)
4480 struct lpfc_sli *psli;
4481 struct lpfc_sli_ring *pring;
4488 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4489 "0325 Reset HBA Data: x%x x%x\n",
4490 (phba->pport) ? phba->pport->port_state : 0,
4493 /* perform board reset */
4494 phba->fc_eventTag = 0;
4495 phba->link_events = 0;
4497 phba->pport->fc_myDID = 0;
4498 phba->pport->fc_prevDID = 0;
4501 /* Turn off parity checking and serr during the physical reset */
4502 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4503 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4505 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4507 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4509 /* Now toggle INITFF bit in the Host Control Register */
4510 writel(HC_INITFF, phba->HCregaddr);
4512 readl(phba->HCregaddr); /* flush */
4513 writel(0, phba->HCregaddr);
4514 readl(phba->HCregaddr); /* flush */
4516 /* Restore PCI cmd register */
4517 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4519 /* Initialize relevant SLI info */
4520 for (i = 0; i < psli->num_rings; i++) {
4521 pring = &psli->sli3_ring[i];
4523 pring->sli.sli3.rspidx = 0;
4524 pring->sli.sli3.next_cmdidx = 0;
4525 pring->sli.sli3.local_getidx = 0;
4526 pring->sli.sli3.cmdidx = 0;
4527 pring->missbufcnt = 0;
4530 phba->link_state = LPFC_WARM_START;
4535 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4536 * @phba: Pointer to HBA context object.
4538 * This function resets a SLI4 HBA. This function disables PCI layer parity
4539 * checking during resets the device. The caller is not required to hold
4542 * This function returns 0 always.
4545 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4547 struct lpfc_sli *psli = &phba->sli;
4552 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4553 "0295 Reset HBA Data: x%x x%x x%x\n",
4554 phba->pport->port_state, psli->sli_flag,
4557 /* perform board reset */
4558 phba->fc_eventTag = 0;
4559 phba->link_events = 0;
4560 phba->pport->fc_myDID = 0;
4561 phba->pport->fc_prevDID = 0;
4563 spin_lock_irq(&phba->hbalock);
4564 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4565 phba->fcf.fcf_flag = 0;
4566 spin_unlock_irq(&phba->hbalock);
4568 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4569 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4570 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4574 /* Now physically reset the device */
4575 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4576 "0389 Performing PCI function reset!\n");
4578 /* Turn off parity checking and serr during the physical reset */
4579 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4580 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4581 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4583 /* Perform FCoE PCI function reset before freeing queue memory */
4584 rc = lpfc_pci_function_reset(phba);
4586 /* Restore PCI cmd register */
4587 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4593 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4594 * @phba: Pointer to HBA context object.
4596 * This function is called in the SLI initialization code path to
4597 * restart the HBA. The caller is not required to hold any lock.
4598 * This function writes MBX_RESTART mailbox command to the SLIM and
4599 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4600 * function to free any pending commands. The function enables
4601 * POST only during the first initialization. The function returns zero.
4602 * The function does not guarantee completion of MBX_RESTART mailbox
4603 * command before the return of this function.
4606 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4609 struct lpfc_sli *psli;
4610 volatile uint32_t word0;
4611 void __iomem *to_slim;
4612 uint32_t hba_aer_enabled;
4614 spin_lock_irq(&phba->hbalock);
4616 /* Take PCIe device Advanced Error Reporting (AER) state */
4617 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4622 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4623 "0337 Restart HBA Data: x%x x%x\n",
4624 (phba->pport) ? phba->pport->port_state : 0,
4628 mb = (MAILBOX_t *) &word0;
4629 mb->mbxCommand = MBX_RESTART;
4632 lpfc_reset_barrier(phba);
4634 to_slim = phba->MBslimaddr;
4635 writel(*(uint32_t *) mb, to_slim);
4636 readl(to_slim); /* flush */
4638 /* Only skip post after fc_ffinit is completed */
4639 if (phba->pport && phba->pport->port_state)
4640 word0 = 1; /* This is really setting up word1 */
4642 word0 = 0; /* This is really setting up word1 */
4643 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4644 writel(*(uint32_t *) mb, to_slim);
4645 readl(to_slim); /* flush */
4647 lpfc_sli_brdreset(phba);
4649 phba->pport->stopped = 0;
4650 phba->link_state = LPFC_INIT_START;
4652 spin_unlock_irq(&phba->hbalock);
4654 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4655 psli->stats_start = ktime_get_seconds();
4657 /* Give the INITFF and Post time to settle. */
4660 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4661 if (hba_aer_enabled)
4662 pci_disable_pcie_error_reporting(phba->pcidev);
4664 lpfc_hba_down_post(phba);
4670 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4671 * @phba: Pointer to HBA context object.
4673 * This function is called in the SLI initialization code path to restart
4674 * a SLI4 HBA. The caller is not required to hold any lock.
4675 * At the end of the function, it calls lpfc_hba_down_post function to
4676 * free any pending commands.
4679 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4681 struct lpfc_sli *psli = &phba->sli;
4682 uint32_t hba_aer_enabled;
4686 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4687 "0296 Restart HBA Data: x%x x%x\n",
4688 phba->pport->port_state, psli->sli_flag);
4690 /* Take PCIe device Advanced Error Reporting (AER) state */
4691 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4693 rc = lpfc_sli4_brdreset(phba);
4697 spin_lock_irq(&phba->hbalock);
4698 phba->pport->stopped = 0;
4699 phba->link_state = LPFC_INIT_START;
4701 spin_unlock_irq(&phba->hbalock);
4703 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4704 psli->stats_start = ktime_get_seconds();
4706 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4707 if (hba_aer_enabled)
4708 pci_disable_pcie_error_reporting(phba->pcidev);
4710 lpfc_hba_down_post(phba);
4711 lpfc_sli4_queue_destroy(phba);
4717 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4718 * @phba: Pointer to HBA context object.
4720 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4721 * API jump table function pointer from the lpfc_hba struct.
4724 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4726 return phba->lpfc_sli_brdrestart(phba);
4730 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4731 * @phba: Pointer to HBA context object.
4733 * This function is called after a HBA restart to wait for successful
4734 * restart of the HBA. Successful restart of the HBA is indicated by
4735 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4736 * iteration, the function will restart the HBA again. The function returns
4737 * zero if HBA successfully restarted else returns negative error code.
4740 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4742 uint32_t status, i = 0;
4744 /* Read the HBA Host Status Register */
4745 if (lpfc_readl(phba->HSregaddr, &status))
4748 /* Check status register to see what current state is */
4750 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4752 /* Check every 10ms for 10 retries, then every 100ms for 90
4753 * retries, then every 1 sec for 50 retires for a total of
4754 * ~60 seconds before reset the board again and check every
4755 * 1 sec for 50 retries. The up to 60 seconds before the
4756 * board ready is required by the Falcon FIPS zeroization
4757 * complete, and any reset the board in between shall cause
4758 * restart of zeroization, further delay the board ready.
4761 /* Adapter failed to init, timeout, status reg
4763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4764 "0436 Adapter failed to init, "
4765 "timeout, status reg x%x, "
4766 "FW Data: A8 x%x AC x%x\n", status,
4767 readl(phba->MBslimaddr + 0xa8),
4768 readl(phba->MBslimaddr + 0xac));
4769 phba->link_state = LPFC_HBA_ERROR;
4773 /* Check to see if any errors occurred during init */
4774 if (status & HS_FFERM) {
4775 /* ERROR: During chipset initialization */
4776 /* Adapter failed to init, chipset, status reg
4778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4779 "0437 Adapter failed to init, "
4780 "chipset, status reg x%x, "
4781 "FW Data: A8 x%x AC x%x\n", status,
4782 readl(phba->MBslimaddr + 0xa8),
4783 readl(phba->MBslimaddr + 0xac));
4784 phba->link_state = LPFC_HBA_ERROR;
4797 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4798 lpfc_sli_brdrestart(phba);
4800 /* Read the HBA Host Status Register */
4801 if (lpfc_readl(phba->HSregaddr, &status))
4805 /* Check to see if any errors occurred during init */
4806 if (status & HS_FFERM) {
4807 /* ERROR: During chipset initialization */
4808 /* Adapter failed to init, chipset, status reg <status> */
4809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4810 "0438 Adapter failed to init, chipset, "
4812 "FW Data: A8 x%x AC x%x\n", status,
4813 readl(phba->MBslimaddr + 0xa8),
4814 readl(phba->MBslimaddr + 0xac));
4815 phba->link_state = LPFC_HBA_ERROR;
4819 /* Clear all interrupt enable conditions */
4820 writel(0, phba->HCregaddr);
4821 readl(phba->HCregaddr); /* flush */
4823 /* setup host attn register */
4824 writel(0xffffffff, phba->HAregaddr);
4825 readl(phba->HAregaddr); /* flush */
4830 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4832 * This function calculates and returns the number of HBQs required to be
4836 lpfc_sli_hbq_count(void)
4838 return ARRAY_SIZE(lpfc_hbq_defs);
4842 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4844 * This function adds the number of hbq entries in every HBQ to get
4845 * the total number of hbq entries required for the HBA and returns
4849 lpfc_sli_hbq_entry_count(void)
4851 int hbq_count = lpfc_sli_hbq_count();
4855 for (i = 0; i < hbq_count; ++i)
4856 count += lpfc_hbq_defs[i]->entry_count;
4861 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4863 * This function calculates amount of memory required for all hbq entries
4864 * to be configured and returns the total memory required.
4867 lpfc_sli_hbq_size(void)
4869 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4873 * lpfc_sli_hbq_setup - configure and initialize HBQs
4874 * @phba: Pointer to HBA context object.
4876 * This function is called during the SLI initialization to configure
4877 * all the HBQs and post buffers to the HBQ. The caller is not
4878 * required to hold any locks. This function will return zero if successful
4879 * else it will return negative error code.
4882 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4884 int hbq_count = lpfc_sli_hbq_count();
4888 uint32_t hbq_entry_index;
4890 /* Get a Mailbox buffer to setup mailbox
4891 * commands for HBA initialization
4893 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4900 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4901 phba->link_state = LPFC_INIT_MBX_CMDS;
4902 phba->hbq_in_use = 1;
4904 hbq_entry_index = 0;
4905 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4906 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4907 phba->hbqs[hbqno].hbqPutIdx = 0;
4908 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4909 phba->hbqs[hbqno].entry_count =
4910 lpfc_hbq_defs[hbqno]->entry_count;
4911 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4912 hbq_entry_index, pmb);
4913 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4915 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4916 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4917 mbxStatus <status>, ring <num> */
4919 lpfc_printf_log(phba, KERN_ERR,
4920 LOG_SLI | LOG_VPORT,
4921 "1805 Adapter failed to init. "
4922 "Data: x%x x%x x%x\n",
4924 pmbox->mbxStatus, hbqno);
4926 phba->link_state = LPFC_HBA_ERROR;
4927 mempool_free(pmb, phba->mbox_mem_pool);
4931 phba->hbq_count = hbq_count;
4933 mempool_free(pmb, phba->mbox_mem_pool);
4935 /* Initially populate or replenish the HBQs */
4936 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4937 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4942 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4943 * @phba: Pointer to HBA context object.
4945 * This function is called during the SLI initialization to configure
4946 * all the HBQs and post buffers to the HBQ. The caller is not
4947 * required to hold any locks. This function will return zero if successful
4948 * else it will return negative error code.
4951 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4953 phba->hbq_in_use = 1;
4954 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4955 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4956 phba->hbq_count = 1;
4957 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4958 /* Initially populate or replenish the HBQs */
4963 * lpfc_sli_config_port - Issue config port mailbox command
4964 * @phba: Pointer to HBA context object.
4965 * @sli_mode: sli mode - 2/3
4967 * This function is called by the sli initialization code path
4968 * to issue config_port mailbox command. This function restarts the
4969 * HBA firmware and issues a config_port mailbox command to configure
4970 * the SLI interface in the sli mode specified by sli_mode
4971 * variable. The caller is not required to hold any locks.
4972 * The function returns 0 if successful, else returns negative error
4976 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4979 uint32_t resetcount = 0, rc = 0, done = 0;
4981 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4983 phba->link_state = LPFC_HBA_ERROR;
4987 phba->sli_rev = sli_mode;
4988 while (resetcount < 2 && !done) {
4989 spin_lock_irq(&phba->hbalock);
4990 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4991 spin_unlock_irq(&phba->hbalock);
4992 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4993 lpfc_sli_brdrestart(phba);
4994 rc = lpfc_sli_chipset_init(phba);
4998 spin_lock_irq(&phba->hbalock);
4999 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5000 spin_unlock_irq(&phba->hbalock);
5003 /* Call pre CONFIG_PORT mailbox command initialization. A
5004 * value of 0 means the call was successful. Any other
5005 * nonzero value is a failure, but if ERESTART is returned,
5006 * the driver may reset the HBA and try again.
5008 rc = lpfc_config_port_prep(phba);
5009 if (rc == -ERESTART) {
5010 phba->link_state = LPFC_LINK_UNKNOWN;
5015 phba->link_state = LPFC_INIT_MBX_CMDS;
5016 lpfc_config_port(phba, pmb);
5017 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5018 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5019 LPFC_SLI3_HBQ_ENABLED |
5020 LPFC_SLI3_CRP_ENABLED |
5021 LPFC_SLI3_DSS_ENABLED);
5022 if (rc != MBX_SUCCESS) {
5023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5024 "0442 Adapter failed to init, mbxCmd x%x "
5025 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5026 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5027 spin_lock_irq(&phba->hbalock);
5028 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5029 spin_unlock_irq(&phba->hbalock);
5032 /* Allow asynchronous mailbox command to go through */
5033 spin_lock_irq(&phba->hbalock);
5034 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5035 spin_unlock_irq(&phba->hbalock);
5038 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5039 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5040 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5041 "3110 Port did not grant ASABT\n");
5046 goto do_prep_failed;
5048 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5049 if (!pmb->u.mb.un.varCfgPort.cMA) {
5051 goto do_prep_failed;
5053 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5054 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5055 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5056 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5057 phba->max_vpi : phba->max_vports;
5061 phba->fips_level = 0;
5062 phba->fips_spec_rev = 0;
5063 if (pmb->u.mb.un.varCfgPort.gdss) {
5064 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5065 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5066 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5067 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5068 "2850 Security Crypto Active. FIPS x%d "
5070 phba->fips_level, phba->fips_spec_rev);
5072 if (pmb->u.mb.un.varCfgPort.sec_err) {
5073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5074 "2856 Config Port Security Crypto "
5076 pmb->u.mb.un.varCfgPort.sec_err);
5078 if (pmb->u.mb.un.varCfgPort.gerbm)
5079 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5080 if (pmb->u.mb.un.varCfgPort.gcrp)
5081 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5083 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5084 phba->port_gp = phba->mbox->us.s3_pgp.port;
5086 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5087 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5088 phba->cfg_enable_bg = 0;
5089 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5091 "0443 Adapter did not grant "
5096 phba->hbq_get = NULL;
5097 phba->port_gp = phba->mbox->us.s2.port;
5101 mempool_free(pmb, phba->mbox_mem_pool);
5107 * lpfc_sli_hba_setup - SLI initialization function
5108 * @phba: Pointer to HBA context object.
5110 * This function is the main SLI initialization function. This function
5111 * is called by the HBA initialization code, HBA reset code and HBA
5112 * error attention handler code. Caller is not required to hold any
5113 * locks. This function issues config_port mailbox command to configure
5114 * the SLI, setup iocb rings and HBQ rings. In the end the function
5115 * calls the config_port_post function to issue init_link mailbox
5116 * command and to start the discovery. The function will return zero
5117 * if successful, else it will return negative error code.
5120 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5126 switch (phba->cfg_sli_mode) {
5128 if (phba->cfg_enable_npiv) {
5129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5130 "1824 NPIV enabled: Override sli_mode "
5131 "parameter (%d) to auto (0).\n",
5132 phba->cfg_sli_mode);
5141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5142 "1819 Unrecognized sli_mode parameter: %d.\n",
5143 phba->cfg_sli_mode);
5147 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5149 rc = lpfc_sli_config_port(phba, mode);
5151 if (rc && phba->cfg_sli_mode == 3)
5152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5153 "1820 Unable to select SLI-3. "
5154 "Not supported by adapter.\n");
5155 if (rc && mode != 2)
5156 rc = lpfc_sli_config_port(phba, 2);
5157 else if (rc && mode == 2)
5158 rc = lpfc_sli_config_port(phba, 3);
5160 goto lpfc_sli_hba_setup_error;
5162 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5163 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5164 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5167 "2709 This device supports "
5168 "Advanced Error Reporting (AER)\n");
5169 spin_lock_irq(&phba->hbalock);
5170 phba->hba_flag |= HBA_AER_ENABLED;
5171 spin_unlock_irq(&phba->hbalock);
5173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5174 "2708 This device does not support "
5175 "Advanced Error Reporting (AER): %d\n",
5177 phba->cfg_aer_support = 0;
5181 if (phba->sli_rev == 3) {
5182 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5183 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5185 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5186 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5187 phba->sli3_options = 0;
5190 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5191 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5192 phba->sli_rev, phba->max_vpi);
5193 rc = lpfc_sli_ring_map(phba);
5196 goto lpfc_sli_hba_setup_error;
5198 /* Initialize VPIs. */
5199 if (phba->sli_rev == LPFC_SLI_REV3) {
5201 * The VPI bitmask and physical ID array are allocated
5202 * and initialized once only - at driver load. A port
5203 * reset doesn't need to reinitialize this memory.
5205 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5206 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5207 phba->vpi_bmask = kcalloc(longs,
5208 sizeof(unsigned long),
5210 if (!phba->vpi_bmask) {
5212 goto lpfc_sli_hba_setup_error;
5215 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5218 if (!phba->vpi_ids) {
5219 kfree(phba->vpi_bmask);
5221 goto lpfc_sli_hba_setup_error;
5223 for (i = 0; i < phba->max_vpi; i++)
5224 phba->vpi_ids[i] = i;
5229 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5230 rc = lpfc_sli_hbq_setup(phba);
5232 goto lpfc_sli_hba_setup_error;
5234 spin_lock_irq(&phba->hbalock);
5235 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5236 spin_unlock_irq(&phba->hbalock);
5238 rc = lpfc_config_port_post(phba);
5240 goto lpfc_sli_hba_setup_error;
5244 lpfc_sli_hba_setup_error:
5245 phba->link_state = LPFC_HBA_ERROR;
5246 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5247 "0445 Firmware initialization failed\n");
5252 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5253 * @phba: Pointer to HBA context object.
5254 * @mboxq: mailbox pointer.
5255 * This function issue a dump mailbox command to read config region
5256 * 23 and parse the records in the region and populate driver
5260 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5262 LPFC_MBOXQ_t *mboxq;
5263 struct lpfc_dmabuf *mp;
5264 struct lpfc_mqe *mqe;
5265 uint32_t data_length;
5268 /* Program the default value of vlan_id and fc_map */
5269 phba->valid_vlan = 0;
5270 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5271 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5272 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5274 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5278 mqe = &mboxq->u.mqe;
5279 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5281 goto out_free_mboxq;
5284 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5285 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5287 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5288 "(%d):2571 Mailbox cmd x%x Status x%x "
5289 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5290 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5291 "CQ: x%x x%x x%x x%x\n",
5292 mboxq->vport ? mboxq->vport->vpi : 0,
5293 bf_get(lpfc_mqe_command, mqe),
5294 bf_get(lpfc_mqe_status, mqe),
5295 mqe->un.mb_words[0], mqe->un.mb_words[1],
5296 mqe->un.mb_words[2], mqe->un.mb_words[3],
5297 mqe->un.mb_words[4], mqe->un.mb_words[5],
5298 mqe->un.mb_words[6], mqe->un.mb_words[7],
5299 mqe->un.mb_words[8], mqe->un.mb_words[9],
5300 mqe->un.mb_words[10], mqe->un.mb_words[11],
5301 mqe->un.mb_words[12], mqe->un.mb_words[13],
5302 mqe->un.mb_words[14], mqe->un.mb_words[15],
5303 mqe->un.mb_words[16], mqe->un.mb_words[50],
5305 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5306 mboxq->mcqe.trailer);
5309 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5312 goto out_free_mboxq;
5314 data_length = mqe->un.mb_words[5];
5315 if (data_length > DMP_RGN23_SIZE) {
5316 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5319 goto out_free_mboxq;
5322 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5323 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5328 mempool_free(mboxq, phba->mbox_mem_pool);
5333 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5334 * @phba: pointer to lpfc hba data structure.
5335 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5336 * @vpd: pointer to the memory to hold resulting port vpd data.
5337 * @vpd_size: On input, the number of bytes allocated to @vpd.
5338 * On output, the number of data bytes in @vpd.
5340 * This routine executes a READ_REV SLI4 mailbox command. In
5341 * addition, this routine gets the port vpd data.
5345 * -ENOMEM - could not allocated memory.
5348 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5349 uint8_t *vpd, uint32_t *vpd_size)
5353 struct lpfc_dmabuf *dmabuf;
5354 struct lpfc_mqe *mqe;
5356 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5361 * Get a DMA buffer for the vpd data resulting from the READ_REV
5364 dma_size = *vpd_size;
5365 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5366 &dmabuf->phys, GFP_KERNEL);
5367 if (!dmabuf->virt) {
5373 * The SLI4 implementation of READ_REV conflicts at word1,
5374 * bits 31:16 and SLI4 adds vpd functionality not present
5375 * in SLI3. This code corrects the conflicts.
5377 lpfc_read_rev(phba, mboxq);
5378 mqe = &mboxq->u.mqe;
5379 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5380 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5381 mqe->un.read_rev.word1 &= 0x0000FFFF;
5382 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5383 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5385 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5387 dma_free_coherent(&phba->pcidev->dev, dma_size,
5388 dmabuf->virt, dmabuf->phys);
5394 * The available vpd length cannot be bigger than the
5395 * DMA buffer passed to the port. Catch the less than
5396 * case and update the caller's size.
5398 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5399 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5401 memcpy(vpd, dmabuf->virt, *vpd_size);
5403 dma_free_coherent(&phba->pcidev->dev, dma_size,
5404 dmabuf->virt, dmabuf->phys);
5410 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5411 * @phba: pointer to lpfc hba data structure.
5413 * This routine retrieves SLI4 device physical port name this PCI function
5418 * otherwise - failed to retrieve physical port name
5421 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5423 LPFC_MBOXQ_t *mboxq;
5424 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5425 struct lpfc_controller_attribute *cntl_attr;
5426 struct lpfc_mbx_get_port_name *get_port_name;
5427 void *virtaddr = NULL;
5428 uint32_t alloclen, reqlen;
5429 uint32_t shdr_status, shdr_add_status;
5430 union lpfc_sli4_cfg_shdr *shdr;
5431 char cport_name = 0;
5434 /* We assume nothing at this point */
5435 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5436 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5438 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5441 /* obtain link type and link number via READ_CONFIG */
5442 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5443 lpfc_sli4_read_config(phba);
5444 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5445 goto retrieve_ppname;
5447 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5448 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5449 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5450 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5451 LPFC_SLI4_MBX_NEMBED);
5452 if (alloclen < reqlen) {
5453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5454 "3084 Allocated DMA memory size (%d) is "
5455 "less than the requested DMA memory size "
5456 "(%d)\n", alloclen, reqlen);
5458 goto out_free_mboxq;
5460 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5461 virtaddr = mboxq->sge_array->addr[0];
5462 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5463 shdr = &mbx_cntl_attr->cfg_shdr;
5464 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5465 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5466 if (shdr_status || shdr_add_status || rc) {
5467 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5468 "3085 Mailbox x%x (x%x/x%x) failed, "
5469 "rc:x%x, status:x%x, add_status:x%x\n",
5470 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5471 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5472 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5473 rc, shdr_status, shdr_add_status);
5475 goto out_free_mboxq;
5477 cntl_attr = &mbx_cntl_attr->cntl_attr;
5478 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5479 phba->sli4_hba.lnk_info.lnk_tp =
5480 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5481 phba->sli4_hba.lnk_info.lnk_no =
5482 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5483 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5484 "3086 lnk_type:%d, lnk_numb:%d\n",
5485 phba->sli4_hba.lnk_info.lnk_tp,
5486 phba->sli4_hba.lnk_info.lnk_no);
5489 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5490 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5491 sizeof(struct lpfc_mbx_get_port_name) -
5492 sizeof(struct lpfc_sli4_cfg_mhdr),
5493 LPFC_SLI4_MBX_EMBED);
5494 get_port_name = &mboxq->u.mqe.un.get_port_name;
5495 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5496 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5497 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5498 phba->sli4_hba.lnk_info.lnk_tp);
5499 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5500 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5501 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5502 if (shdr_status || shdr_add_status || rc) {
5503 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5504 "3087 Mailbox x%x (x%x/x%x) failed: "
5505 "rc:x%x, status:x%x, add_status:x%x\n",
5506 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5507 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5508 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5509 rc, shdr_status, shdr_add_status);
5511 goto out_free_mboxq;
5513 switch (phba->sli4_hba.lnk_info.lnk_no) {
5514 case LPFC_LINK_NUMBER_0:
5515 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5516 &get_port_name->u.response);
5517 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5519 case LPFC_LINK_NUMBER_1:
5520 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5521 &get_port_name->u.response);
5522 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5524 case LPFC_LINK_NUMBER_2:
5525 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5526 &get_port_name->u.response);
5527 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5529 case LPFC_LINK_NUMBER_3:
5530 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5531 &get_port_name->u.response);
5532 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5538 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5539 phba->Port[0] = cport_name;
5540 phba->Port[1] = '\0';
5541 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5542 "3091 SLI get port name: %s\n", phba->Port);
5546 if (rc != MBX_TIMEOUT) {
5547 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5548 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5550 mempool_free(mboxq, phba->mbox_mem_pool);
5556 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5557 * @phba: pointer to lpfc hba data structure.
5559 * This routine is called to explicitly arm the SLI4 device's completion and
5563 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5566 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5568 sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
5569 sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
5570 if (sli4_hba->nvmels_cq)
5571 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
5574 if (sli4_hba->fcp_cq)
5575 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5576 sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
5579 if (sli4_hba->nvme_cq)
5580 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5581 sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
5585 sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
5587 if (sli4_hba->hba_eq)
5588 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5589 sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
5592 if (phba->nvmet_support) {
5593 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5594 sli4_hba->sli4_cq_release(
5595 sli4_hba->nvmet_cqset[qidx],
5601 sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
5605 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5606 * @phba: Pointer to HBA context object.
5607 * @type: The resource extent type.
5608 * @extnt_count: buffer to hold port available extent count.
5609 * @extnt_size: buffer to hold element count per extent.
5611 * This function calls the port and retrievs the number of available
5612 * extents and their size for a particular extent type.
5614 * Returns: 0 if successful. Nonzero otherwise.
5617 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5618 uint16_t *extnt_count, uint16_t *extnt_size)
5623 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5626 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5630 /* Find out how many extents are available for this resource type */
5631 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5632 sizeof(struct lpfc_sli4_cfg_mhdr));
5633 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5634 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5635 length, LPFC_SLI4_MBX_EMBED);
5637 /* Send an extents count of 0 - the GET doesn't use it. */
5638 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5639 LPFC_SLI4_MBX_EMBED);
5645 if (!phba->sli4_hba.intr_enable)
5646 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5648 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5649 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5656 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5657 if (bf_get(lpfc_mbox_hdr_status,
5658 &rsrc_info->header.cfg_shdr.response)) {
5659 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5660 "2930 Failed to get resource extents "
5661 "Status 0x%x Add'l Status 0x%x\n",
5662 bf_get(lpfc_mbox_hdr_status,
5663 &rsrc_info->header.cfg_shdr.response),
5664 bf_get(lpfc_mbox_hdr_add_status,
5665 &rsrc_info->header.cfg_shdr.response));
5670 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5672 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5675 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5676 "3162 Retrieved extents type-%d from port: count:%d, "
5677 "size:%d\n", type, *extnt_count, *extnt_size);
5680 mempool_free(mbox, phba->mbox_mem_pool);
5685 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5686 * @phba: Pointer to HBA context object.
5687 * @type: The extent type to check.
5689 * This function reads the current available extents from the port and checks
5690 * if the extent count or extent size has changed since the last access.
5691 * Callers use this routine post port reset to understand if there is a
5692 * extent reprovisioning requirement.
5695 * -Error: error indicates problem.
5696 * 1: Extent count or size has changed.
5700 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5702 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5703 uint16_t size_diff, rsrc_ext_size;
5705 struct lpfc_rsrc_blks *rsrc_entry;
5706 struct list_head *rsrc_blk_list = NULL;
5710 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5717 case LPFC_RSC_TYPE_FCOE_RPI:
5718 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5720 case LPFC_RSC_TYPE_FCOE_VPI:
5721 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5723 case LPFC_RSC_TYPE_FCOE_XRI:
5724 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5726 case LPFC_RSC_TYPE_FCOE_VFI:
5727 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5733 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5735 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5739 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5746 * lpfc_sli4_cfg_post_extnts -
5747 * @phba: Pointer to HBA context object.
5748 * @extnt_cnt - number of available extents.
5749 * @type - the extent type (rpi, xri, vfi, vpi).
5750 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5751 * @mbox - pointer to the caller's allocated mailbox structure.
5753 * This function executes the extents allocation request. It also
5754 * takes care of the amount of memory needed to allocate or get the
5755 * allocated extents. It is the caller's responsibility to evaluate
5759 * -Error: Error value describes the condition found.
5763 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5764 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5769 uint32_t alloc_len, mbox_tmo;
5771 /* Calculate the total requested length of the dma memory */
5772 req_len = extnt_cnt * sizeof(uint16_t);
5775 * Calculate the size of an embedded mailbox. The uint32_t
5776 * accounts for extents-specific word.
5778 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5782 * Presume the allocation and response will fit into an embedded
5783 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5785 *emb = LPFC_SLI4_MBX_EMBED;
5786 if (req_len > emb_len) {
5787 req_len = extnt_cnt * sizeof(uint16_t) +
5788 sizeof(union lpfc_sli4_cfg_shdr) +
5790 *emb = LPFC_SLI4_MBX_NEMBED;
5793 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5794 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5796 if (alloc_len < req_len) {
5797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5798 "2982 Allocated DMA memory size (x%x) is "
5799 "less than the requested DMA memory "
5800 "size (x%x)\n", alloc_len, req_len);
5803 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5807 if (!phba->sli4_hba.intr_enable)
5808 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5810 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5811 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5820 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5821 * @phba: Pointer to HBA context object.
5822 * @type: The resource extent type to allocate.
5824 * This function allocates the number of elements for the specified
5828 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5831 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5832 uint16_t rsrc_id, rsrc_start, j, k;
5835 unsigned long longs;
5836 unsigned long *bmask;
5837 struct lpfc_rsrc_blks *rsrc_blks;
5840 struct lpfc_id_range *id_array = NULL;
5841 void *virtaddr = NULL;
5842 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5843 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5844 struct list_head *ext_blk_list;
5846 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5852 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5853 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5854 "3009 No available Resource Extents "
5855 "for resource type 0x%x: Count: 0x%x, "
5856 "Size 0x%x\n", type, rsrc_cnt,
5861 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5862 "2903 Post resource extents type-0x%x: "
5863 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5865 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5869 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5876 * Figure out where the response is located. Then get local pointers
5877 * to the response data. The port does not guarantee to respond to
5878 * all extents counts request so update the local variable with the
5879 * allocated count from the port.
5881 if (emb == LPFC_SLI4_MBX_EMBED) {
5882 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5883 id_array = &rsrc_ext->u.rsp.id[0];
5884 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5886 virtaddr = mbox->sge_array->addr[0];
5887 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5888 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5889 id_array = &n_rsrc->id;
5892 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5893 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5896 * Based on the resource size and count, correct the base and max
5899 length = sizeof(struct lpfc_rsrc_blks);
5901 case LPFC_RSC_TYPE_FCOE_RPI:
5902 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5903 sizeof(unsigned long),
5905 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5909 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5912 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5913 kfree(phba->sli4_hba.rpi_bmask);
5919 * The next_rpi was initialized with the maximum available
5920 * count but the port may allocate a smaller number. Catch
5921 * that case and update the next_rpi.
5923 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5925 /* Initialize local ptrs for common extent processing later. */
5926 bmask = phba->sli4_hba.rpi_bmask;
5927 ids = phba->sli4_hba.rpi_ids;
5928 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5930 case LPFC_RSC_TYPE_FCOE_VPI:
5931 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5933 if (unlikely(!phba->vpi_bmask)) {
5937 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5939 if (unlikely(!phba->vpi_ids)) {
5940 kfree(phba->vpi_bmask);
5945 /* Initialize local ptrs for common extent processing later. */
5946 bmask = phba->vpi_bmask;
5947 ids = phba->vpi_ids;
5948 ext_blk_list = &phba->lpfc_vpi_blk_list;
5950 case LPFC_RSC_TYPE_FCOE_XRI:
5951 phba->sli4_hba.xri_bmask = kcalloc(longs,
5952 sizeof(unsigned long),
5954 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5958 phba->sli4_hba.max_cfg_param.xri_used = 0;
5959 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5962 if (unlikely(!phba->sli4_hba.xri_ids)) {
5963 kfree(phba->sli4_hba.xri_bmask);
5968 /* Initialize local ptrs for common extent processing later. */
5969 bmask = phba->sli4_hba.xri_bmask;
5970 ids = phba->sli4_hba.xri_ids;
5971 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5973 case LPFC_RSC_TYPE_FCOE_VFI:
5974 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5975 sizeof(unsigned long),
5977 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5981 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
5984 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5985 kfree(phba->sli4_hba.vfi_bmask);
5990 /* Initialize local ptrs for common extent processing later. */
5991 bmask = phba->sli4_hba.vfi_bmask;
5992 ids = phba->sli4_hba.vfi_ids;
5993 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5996 /* Unsupported Opcode. Fail call. */
6000 ext_blk_list = NULL;
6005 * Complete initializing the extent configuration with the
6006 * allocated ids assigned to this function. The bitmask serves
6007 * as an index into the array and manages the available ids. The
6008 * array just stores the ids communicated to the port via the wqes.
6010 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6012 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6015 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6018 rsrc_blks = kzalloc(length, GFP_KERNEL);
6019 if (unlikely(!rsrc_blks)) {
6025 rsrc_blks->rsrc_start = rsrc_id;
6026 rsrc_blks->rsrc_size = rsrc_size;
6027 list_add_tail(&rsrc_blks->list, ext_blk_list);
6028 rsrc_start = rsrc_id;
6029 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6030 phba->sli4_hba.common_xri_start = rsrc_start +
6031 lpfc_sli4_get_iocb_cnt(phba);
6034 while (rsrc_id < (rsrc_start + rsrc_size)) {
6039 /* Entire word processed. Get next word.*/
6044 lpfc_sli4_mbox_cmd_free(phba, mbox);
6051 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6052 * @phba: Pointer to HBA context object.
6053 * @type: the extent's type.
6055 * This function deallocates all extents of a particular resource type.
6056 * SLI4 does not allow for deallocating a particular extent range. It
6057 * is the caller's responsibility to release all kernel memory resources.
6060 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6063 uint32_t length, mbox_tmo = 0;
6065 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6066 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6068 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6073 * This function sends an embedded mailbox because it only sends the
6074 * the resource type. All extents of this type are released by the
6077 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6078 sizeof(struct lpfc_sli4_cfg_mhdr));
6079 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6080 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6081 length, LPFC_SLI4_MBX_EMBED);
6083 /* Send an extents count of 0 - the dealloc doesn't use it. */
6084 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6085 LPFC_SLI4_MBX_EMBED);
6090 if (!phba->sli4_hba.intr_enable)
6091 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6093 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6094 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6101 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6102 if (bf_get(lpfc_mbox_hdr_status,
6103 &dealloc_rsrc->header.cfg_shdr.response)) {
6104 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6105 "2919 Failed to release resource extents "
6106 "for type %d - Status 0x%x Add'l Status 0x%x. "
6107 "Resource memory not released.\n",
6109 bf_get(lpfc_mbox_hdr_status,
6110 &dealloc_rsrc->header.cfg_shdr.response),
6111 bf_get(lpfc_mbox_hdr_add_status,
6112 &dealloc_rsrc->header.cfg_shdr.response));
6117 /* Release kernel memory resources for the specific type. */
6119 case LPFC_RSC_TYPE_FCOE_VPI:
6120 kfree(phba->vpi_bmask);
6121 kfree(phba->vpi_ids);
6122 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6123 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6124 &phba->lpfc_vpi_blk_list, list) {
6125 list_del_init(&rsrc_blk->list);
6128 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6130 case LPFC_RSC_TYPE_FCOE_XRI:
6131 kfree(phba->sli4_hba.xri_bmask);
6132 kfree(phba->sli4_hba.xri_ids);
6133 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6134 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6135 list_del_init(&rsrc_blk->list);
6139 case LPFC_RSC_TYPE_FCOE_VFI:
6140 kfree(phba->sli4_hba.vfi_bmask);
6141 kfree(phba->sli4_hba.vfi_ids);
6142 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6143 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6144 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6145 list_del_init(&rsrc_blk->list);
6149 case LPFC_RSC_TYPE_FCOE_RPI:
6150 /* RPI bitmask and physical id array are cleaned up earlier. */
6151 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6152 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6153 list_del_init(&rsrc_blk->list);
6161 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6164 mempool_free(mbox, phba->mbox_mem_pool);
6169 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6174 len = sizeof(struct lpfc_mbx_set_feature) -
6175 sizeof(struct lpfc_sli4_cfg_mhdr);
6176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6177 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6178 LPFC_SLI4_MBX_EMBED);
6181 case LPFC_SET_UE_RECOVERY:
6182 bf_set(lpfc_mbx_set_feature_UER,
6183 &mbox->u.mqe.un.set_feature, 1);
6184 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6185 mbox->u.mqe.un.set_feature.param_len = 8;
6187 case LPFC_SET_MDS_DIAGS:
6188 bf_set(lpfc_mbx_set_feature_mds,
6189 &mbox->u.mqe.un.set_feature, 1);
6190 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6191 &mbox->u.mqe.un.set_feature, 1);
6192 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6193 mbox->u.mqe.un.set_feature.param_len = 8;
6201 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6202 * @phba: Pointer to HBA context object.
6204 * Disable FW logging into host memory on the adapter. To
6205 * be done before reading logs from the host memory.
6208 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6210 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6212 ras_fwlog->ras_active = false;
6214 /* Disable FW logging to host memory */
6215 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6216 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6220 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6221 * @phba: Pointer to HBA context object.
6223 * This function is called to free memory allocated for RAS FW logging
6224 * support in the driver.
6227 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6229 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6230 struct lpfc_dmabuf *dmabuf, *next;
6232 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6233 list_for_each_entry_safe(dmabuf, next,
6234 &ras_fwlog->fwlog_buff_list,
6236 list_del(&dmabuf->list);
6237 dma_free_coherent(&phba->pcidev->dev,
6238 LPFC_RAS_MAX_ENTRY_SIZE,
6239 dmabuf->virt, dmabuf->phys);
6244 if (ras_fwlog->lwpd.virt) {
6245 dma_free_coherent(&phba->pcidev->dev,
6246 sizeof(uint32_t) * 2,
6247 ras_fwlog->lwpd.virt,
6248 ras_fwlog->lwpd.phys);
6249 ras_fwlog->lwpd.virt = NULL;
6252 ras_fwlog->ras_active = false;
6256 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6257 * @phba: Pointer to HBA context object.
6258 * @fwlog_buff_count: Count of buffers to be created.
6260 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6261 * to update FW log is posted to the adapter.
6262 * Buffer count is calculated based on module param ras_fwlog_buffsize
6263 * Size of each buffer posted to FW is 64K.
6267 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6268 uint32_t fwlog_buff_count)
6270 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6271 struct lpfc_dmabuf *dmabuf;
6274 /* Initialize List */
6275 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6277 /* Allocate memory for the LWPD */
6278 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6279 sizeof(uint32_t) * 2,
6280 &ras_fwlog->lwpd.phys,
6282 if (!ras_fwlog->lwpd.virt) {
6283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6284 "6185 LWPD Memory Alloc Failed\n");
6289 ras_fwlog->fw_buffcount = fwlog_buff_count;
6290 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6291 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6295 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6296 "6186 Memory Alloc failed FW logging");
6300 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
6301 LPFC_RAS_MAX_ENTRY_SIZE,
6304 if (!dmabuf->virt) {
6307 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6308 "6187 DMA Alloc Failed FW logging");
6311 dmabuf->buffer_tag = i;
6312 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6317 lpfc_sli4_ras_dma_free(phba);
6323 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6324 * @phba: pointer to lpfc hba data structure.
6325 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6327 * Completion handler for driver's RAS MBX command to the device.
6330 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6333 union lpfc_sli4_cfg_shdr *shdr;
6334 uint32_t shdr_status, shdr_add_status;
6335 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6339 shdr = (union lpfc_sli4_cfg_shdr *)
6340 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6341 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6342 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6344 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6345 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6346 "6188 FW LOG mailbox "
6347 "completed with status x%x add_status x%x,"
6348 " mbx status x%x\n",
6349 shdr_status, shdr_add_status, mb->mbxStatus);
6351 ras_fwlog->ras_hwsupport = false;
6355 ras_fwlog->ras_active = true;
6356 mempool_free(pmb, phba->mbox_mem_pool);
6361 /* Free RAS DMA memory */
6362 lpfc_sli4_ras_dma_free(phba);
6363 mempool_free(pmb, phba->mbox_mem_pool);
6367 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6368 * @phba: pointer to lpfc hba data structure.
6369 * @fwlog_level: Logging verbosity level.
6370 * @fwlog_enable: Enable/Disable logging.
6372 * Initialize memory and post mailbox command to enable FW logging in host
6376 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6377 uint32_t fwlog_level,
6378 uint32_t fwlog_enable)
6380 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6381 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6382 struct lpfc_dmabuf *dmabuf;
6384 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6387 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6388 phba->cfg_ras_fwlog_buffsize);
6389 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6392 * If re-enabling FW logging support use earlier allocated
6393 * DMA buffers while posting MBX command.
6395 if (!ras_fwlog->lwpd.virt) {
6396 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6398 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6399 "6189 FW Log Memory Allocation Failed");
6404 /* Setup Mailbox command */
6405 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6408 "6190 RAS MBX Alloc Failed");
6413 ras_fwlog->fw_loglevel = fwlog_level;
6414 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6415 sizeof(struct lpfc_sli4_cfg_mhdr));
6417 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6418 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6419 len, LPFC_SLI4_MBX_EMBED);
6421 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6422 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6424 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6425 ras_fwlog->fw_loglevel);
6426 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6427 ras_fwlog->fw_buffcount);
6428 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6429 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6431 /* Update DMA buffer address */
6432 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6433 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6435 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6436 putPaddrLow(dmabuf->phys);
6438 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6439 putPaddrHigh(dmabuf->phys);
6442 /* Update LPWD address */
6443 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6444 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6446 mbox->vport = phba->pport;
6447 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6449 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6451 if (rc == MBX_NOT_FINISHED) {
6452 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6453 "6191 FW-Log Mailbox failed. "
6454 "status %d mbxStatus : x%x", rc,
6455 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6456 mempool_free(mbox, phba->mbox_mem_pool);
6463 lpfc_sli4_ras_dma_free(phba);
6469 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6470 * @phba: Pointer to HBA context object.
6472 * Check if RAS is supported on the adapter and initialize it.
6475 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6477 /* Check RAS FW Log needs to be enabled or not */
6478 if (lpfc_check_fwlog_support(phba))
6481 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6482 LPFC_RAS_ENABLE_LOGGING);
6486 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6487 * @phba: Pointer to HBA context object.
6489 * This function allocates all SLI4 resource identifiers.
6492 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6494 int i, rc, error = 0;
6495 uint16_t count, base;
6496 unsigned long longs;
6498 if (!phba->sli4_hba.rpi_hdrs_in_use)
6499 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6500 if (phba->sli4_hba.extents_in_use) {
6502 * The port supports resource extents. The XRI, VPI, VFI, RPI
6503 * resource extent count must be read and allocated before
6504 * provisioning the resource id arrays.
6506 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6507 LPFC_IDX_RSRC_RDY) {
6509 * Extent-based resources are set - the driver could
6510 * be in a port reset. Figure out if any corrective
6511 * actions need to be taken.
6513 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6514 LPFC_RSC_TYPE_FCOE_VFI);
6517 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6518 LPFC_RSC_TYPE_FCOE_VPI);
6521 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6522 LPFC_RSC_TYPE_FCOE_XRI);
6525 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6526 LPFC_RSC_TYPE_FCOE_RPI);
6531 * It's possible that the number of resources
6532 * provided to this port instance changed between
6533 * resets. Detect this condition and reallocate
6534 * resources. Otherwise, there is no action.
6537 lpfc_printf_log(phba, KERN_INFO,
6538 LOG_MBOX | LOG_INIT,
6539 "2931 Detected extent resource "
6540 "change. Reallocating all "
6542 rc = lpfc_sli4_dealloc_extent(phba,
6543 LPFC_RSC_TYPE_FCOE_VFI);
6544 rc = lpfc_sli4_dealloc_extent(phba,
6545 LPFC_RSC_TYPE_FCOE_VPI);
6546 rc = lpfc_sli4_dealloc_extent(phba,
6547 LPFC_RSC_TYPE_FCOE_XRI);
6548 rc = lpfc_sli4_dealloc_extent(phba,
6549 LPFC_RSC_TYPE_FCOE_RPI);
6554 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6558 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6562 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6566 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6569 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6574 * The port does not support resource extents. The XRI, VPI,
6575 * VFI, RPI resource ids were determined from READ_CONFIG.
6576 * Just allocate the bitmasks and provision the resource id
6577 * arrays. If a port reset is active, the resources don't
6578 * need any action - just exit.
6580 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6581 LPFC_IDX_RSRC_RDY) {
6582 lpfc_sli4_dealloc_resource_identifiers(phba);
6583 lpfc_sli4_remove_rpis(phba);
6586 count = phba->sli4_hba.max_cfg_param.max_rpi;
6588 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6589 "3279 Invalid provisioning of "
6594 base = phba->sli4_hba.max_cfg_param.rpi_base;
6595 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6596 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6597 sizeof(unsigned long),
6599 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6603 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6605 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6607 goto free_rpi_bmask;
6610 for (i = 0; i < count; i++)
6611 phba->sli4_hba.rpi_ids[i] = base + i;
6614 count = phba->sli4_hba.max_cfg_param.max_vpi;
6616 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6617 "3280 Invalid provisioning of "
6622 base = phba->sli4_hba.max_cfg_param.vpi_base;
6623 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6624 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6626 if (unlikely(!phba->vpi_bmask)) {
6630 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6632 if (unlikely(!phba->vpi_ids)) {
6634 goto free_vpi_bmask;
6637 for (i = 0; i < count; i++)
6638 phba->vpi_ids[i] = base + i;
6641 count = phba->sli4_hba.max_cfg_param.max_xri;
6643 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6644 "3281 Invalid provisioning of "
6649 base = phba->sli4_hba.max_cfg_param.xri_base;
6650 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6651 phba->sli4_hba.xri_bmask = kcalloc(longs,
6652 sizeof(unsigned long),
6654 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6658 phba->sli4_hba.max_cfg_param.xri_used = 0;
6659 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6661 if (unlikely(!phba->sli4_hba.xri_ids)) {
6663 goto free_xri_bmask;
6666 for (i = 0; i < count; i++)
6667 phba->sli4_hba.xri_ids[i] = base + i;
6670 count = phba->sli4_hba.max_cfg_param.max_vfi;
6672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6673 "3282 Invalid provisioning of "
6678 base = phba->sli4_hba.max_cfg_param.vfi_base;
6679 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6680 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6681 sizeof(unsigned long),
6683 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6687 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6689 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6691 goto free_vfi_bmask;
6694 for (i = 0; i < count; i++)
6695 phba->sli4_hba.vfi_ids[i] = base + i;
6698 * Mark all resources ready. An HBA reset doesn't need
6699 * to reset the initialization.
6701 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6707 kfree(phba->sli4_hba.vfi_bmask);
6708 phba->sli4_hba.vfi_bmask = NULL;
6710 kfree(phba->sli4_hba.xri_ids);
6711 phba->sli4_hba.xri_ids = NULL;
6713 kfree(phba->sli4_hba.xri_bmask);
6714 phba->sli4_hba.xri_bmask = NULL;
6716 kfree(phba->vpi_ids);
6717 phba->vpi_ids = NULL;
6719 kfree(phba->vpi_bmask);
6720 phba->vpi_bmask = NULL;
6722 kfree(phba->sli4_hba.rpi_ids);
6723 phba->sli4_hba.rpi_ids = NULL;
6725 kfree(phba->sli4_hba.rpi_bmask);
6726 phba->sli4_hba.rpi_bmask = NULL;
6732 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6733 * @phba: Pointer to HBA context object.
6735 * This function allocates the number of elements for the specified
6739 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6741 if (phba->sli4_hba.extents_in_use) {
6742 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6743 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6744 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6745 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6747 kfree(phba->vpi_bmask);
6748 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6749 kfree(phba->vpi_ids);
6750 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6751 kfree(phba->sli4_hba.xri_bmask);
6752 kfree(phba->sli4_hba.xri_ids);
6753 kfree(phba->sli4_hba.vfi_bmask);
6754 kfree(phba->sli4_hba.vfi_ids);
6755 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6756 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6763 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6764 * @phba: Pointer to HBA context object.
6765 * @type: The resource extent type.
6766 * @extnt_count: buffer to hold port extent count response
6767 * @extnt_size: buffer to hold port extent size response.
6769 * This function calls the port to read the host allocated extents
6770 * for a particular type.
6773 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6774 uint16_t *extnt_cnt, uint16_t *extnt_size)
6778 uint16_t curr_blks = 0;
6779 uint32_t req_len, emb_len;
6780 uint32_t alloc_len, mbox_tmo;
6781 struct list_head *blk_list_head;
6782 struct lpfc_rsrc_blks *rsrc_blk;
6784 void *virtaddr = NULL;
6785 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6786 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6787 union lpfc_sli4_cfg_shdr *shdr;
6790 case LPFC_RSC_TYPE_FCOE_VPI:
6791 blk_list_head = &phba->lpfc_vpi_blk_list;
6793 case LPFC_RSC_TYPE_FCOE_XRI:
6794 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6796 case LPFC_RSC_TYPE_FCOE_VFI:
6797 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6799 case LPFC_RSC_TYPE_FCOE_RPI:
6800 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6806 /* Count the number of extents currently allocatd for this type. */
6807 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6808 if (curr_blks == 0) {
6810 * The GET_ALLOCATED mailbox does not return the size,
6811 * just the count. The size should be just the size
6812 * stored in the current allocated block and all sizes
6813 * for an extent type are the same so set the return
6816 *extnt_size = rsrc_blk->rsrc_size;
6822 * Calculate the size of an embedded mailbox. The uint32_t
6823 * accounts for extents-specific word.
6825 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6829 * Presume the allocation and response will fit into an embedded
6830 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6832 emb = LPFC_SLI4_MBX_EMBED;
6834 if (req_len > emb_len) {
6835 req_len = curr_blks * sizeof(uint16_t) +
6836 sizeof(union lpfc_sli4_cfg_shdr) +
6838 emb = LPFC_SLI4_MBX_NEMBED;
6841 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6844 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6846 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6847 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6849 if (alloc_len < req_len) {
6850 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6851 "2983 Allocated DMA memory size (x%x) is "
6852 "less than the requested DMA memory "
6853 "size (x%x)\n", alloc_len, req_len);
6857 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6863 if (!phba->sli4_hba.intr_enable)
6864 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6866 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6867 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6876 * Figure out where the response is located. Then get local pointers
6877 * to the response data. The port does not guarantee to respond to
6878 * all extents counts request so update the local variable with the
6879 * allocated count from the port.
6881 if (emb == LPFC_SLI4_MBX_EMBED) {
6882 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6883 shdr = &rsrc_ext->header.cfg_shdr;
6884 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6886 virtaddr = mbox->sge_array->addr[0];
6887 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6888 shdr = &n_rsrc->cfg_shdr;
6889 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6892 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6894 "2984 Failed to read allocated resources "
6895 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6897 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6898 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6903 lpfc_sli4_mbox_cmd_free(phba, mbox);
6908 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6909 * @phba: pointer to lpfc hba data structure.
6910 * @pring: Pointer to driver SLI ring object.
6911 * @sgl_list: linked link of sgl buffers to post
6912 * @cnt: number of linked list buffers
6914 * This routine walks the list of buffers that have been allocated and
6915 * repost them to the port by using SGL block post. This is needed after a
6916 * pci_function_reset/warm_start or start. It attempts to construct blocks
6917 * of buffer sgls which contains contiguous xris and uses the non-embedded
6918 * SGL block post mailbox commands to post them to the port. For single
6919 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6920 * mailbox command for posting.
6922 * Returns: 0 = success, non-zero failure.
6925 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6926 struct list_head *sgl_list, int cnt)
6928 struct lpfc_sglq *sglq_entry = NULL;
6929 struct lpfc_sglq *sglq_entry_next = NULL;
6930 struct lpfc_sglq *sglq_entry_first = NULL;
6931 int status, total_cnt;
6932 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6933 int last_xritag = NO_XRI;
6934 LIST_HEAD(prep_sgl_list);
6935 LIST_HEAD(blck_sgl_list);
6936 LIST_HEAD(allc_sgl_list);
6937 LIST_HEAD(post_sgl_list);
6938 LIST_HEAD(free_sgl_list);
6940 spin_lock_irq(&phba->hbalock);
6941 spin_lock(&phba->sli4_hba.sgl_list_lock);
6942 list_splice_init(sgl_list, &allc_sgl_list);
6943 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6944 spin_unlock_irq(&phba->hbalock);
6947 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6948 &allc_sgl_list, list) {
6949 list_del_init(&sglq_entry->list);
6951 if ((last_xritag != NO_XRI) &&
6952 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6953 /* a hole in xri block, form a sgl posting block */
6954 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6955 post_cnt = block_cnt - 1;
6956 /* prepare list for next posting block */
6957 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6960 /* prepare list for next posting block */
6961 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6962 /* enough sgls for non-embed sgl mbox command */
6963 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6964 list_splice_init(&prep_sgl_list,
6966 post_cnt = block_cnt;
6972 /* keep track of last sgl's xritag */
6973 last_xritag = sglq_entry->sli4_xritag;
6975 /* end of repost sgl list condition for buffers */
6976 if (num_posted == total_cnt) {
6977 if (post_cnt == 0) {
6978 list_splice_init(&prep_sgl_list,
6980 post_cnt = block_cnt;
6981 } else if (block_cnt == 1) {
6982 status = lpfc_sli4_post_sgl(phba,
6983 sglq_entry->phys, 0,
6984 sglq_entry->sli4_xritag);
6986 /* successful, put sgl to posted list */
6987 list_add_tail(&sglq_entry->list,
6990 /* Failure, put sgl to free list */
6991 lpfc_printf_log(phba, KERN_WARNING,
6993 "3159 Failed to post "
6994 "sgl, xritag:x%x\n",
6995 sglq_entry->sli4_xritag);
6996 list_add_tail(&sglq_entry->list,
7003 /* continue until a nembed page worth of sgls */
7007 /* post the buffer list sgls as a block */
7008 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7012 /* success, put sgl list to posted sgl list */
7013 list_splice_init(&blck_sgl_list, &post_sgl_list);
7015 /* Failure, put sgl list to free sgl list */
7016 sglq_entry_first = list_first_entry(&blck_sgl_list,
7019 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7020 "3160 Failed to post sgl-list, "
7022 sglq_entry_first->sli4_xritag,
7023 (sglq_entry_first->sli4_xritag +
7025 list_splice_init(&blck_sgl_list, &free_sgl_list);
7026 total_cnt -= post_cnt;
7029 /* don't reset xirtag due to hole in xri block */
7031 last_xritag = NO_XRI;
7033 /* reset sgl post count for next round of posting */
7037 /* free the sgls failed to post */
7038 lpfc_free_sgl_list(phba, &free_sgl_list);
7040 /* push sgls posted to the available list */
7041 if (!list_empty(&post_sgl_list)) {
7042 spin_lock_irq(&phba->hbalock);
7043 spin_lock(&phba->sli4_hba.sgl_list_lock);
7044 list_splice_init(&post_sgl_list, sgl_list);
7045 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7046 spin_unlock_irq(&phba->hbalock);
7048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7049 "3161 Failure to post sgl to port.\n");
7053 /* return the number of XRIs actually posted */
7058 * lpfc_sli4_repost_common_sgl_list - Repost all the allocated nvme buffer sgls
7059 * @phba: pointer to lpfc hba data structure.
7061 * This routine walks the list of nvme buffers that have been allocated and
7062 * repost them to the port by using SGL block post. This is needed after a
7063 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7064 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7065 * to the lpfc_common_buf_list. If the repost fails, reject all nvme buffers.
7067 * Returns: 0 = success, non-zero failure.
7070 lpfc_sli4_repost_common_sgl_list(struct lpfc_hba *phba)
7072 LIST_HEAD(post_nblist);
7073 int num_posted, rc = 0;
7075 /* get all NVME buffers need to repost to a local list */
7076 spin_lock_irq(&phba->common_buf_list_get_lock);
7077 spin_lock(&phba->common_buf_list_put_lock);
7078 list_splice_init(&phba->lpfc_common_buf_list_get, &post_nblist);
7079 list_splice(&phba->lpfc_common_buf_list_put, &post_nblist);
7080 phba->get_common_bufs = 0;
7081 phba->put_common_bufs = 0;
7082 spin_unlock(&phba->common_buf_list_put_lock);
7083 spin_unlock_irq(&phba->common_buf_list_get_lock);
7085 /* post the list of nvme buffer sgls to port if available */
7086 if (!list_empty(&post_nblist)) {
7087 num_posted = lpfc_sli4_post_common_sgl_list(
7088 phba, &post_nblist, phba->sli4_hba.common_xri_cnt);
7089 /* failed to post any nvme buffer, return error */
7090 if (num_posted == 0)
7097 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7101 len = sizeof(struct lpfc_mbx_set_host_data) -
7102 sizeof(struct lpfc_sli4_cfg_mhdr);
7103 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7104 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7105 LPFC_SLI4_MBX_EMBED);
7107 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7108 mbox->u.mqe.un.set_host_data.param_len =
7109 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7110 snprintf(mbox->u.mqe.un.set_host_data.data,
7111 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7112 "Linux %s v"LPFC_DRIVER_VERSION,
7113 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7117 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7118 struct lpfc_queue *drq, int count, int idx)
7121 struct lpfc_rqe hrqe;
7122 struct lpfc_rqe drqe;
7123 struct lpfc_rqb *rqbp;
7124 unsigned long flags;
7125 struct rqb_dmabuf *rqb_buffer;
7126 LIST_HEAD(rqb_buf_list);
7128 spin_lock_irqsave(&phba->hbalock, flags);
7130 for (i = 0; i < count; i++) {
7131 /* IF RQ is already full, don't bother */
7132 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7134 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7137 rqb_buffer->hrq = hrq;
7138 rqb_buffer->drq = drq;
7139 rqb_buffer->idx = idx;
7140 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7142 while (!list_empty(&rqb_buf_list)) {
7143 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7146 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7147 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7148 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7149 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7150 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7153 "6421 Cannot post to HRQ %d: %x %x %x "
7161 rqbp->rqb_free_buffer(phba, rqb_buffer);
7163 list_add_tail(&rqb_buffer->hbuf.list,
7164 &rqbp->rqb_buffer_list);
7165 rqbp->buffer_count++;
7168 spin_unlock_irqrestore(&phba->hbalock, flags);
7173 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7174 * @phba: Pointer to HBA context object.
7176 * This function is the main SLI4 device initialization PCI function. This
7177 * function is called by the HBA initialization code, HBA reset code and
7178 * HBA error attention handler code. Caller is not required to hold any
7182 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7185 LPFC_MBOXQ_t *mboxq;
7186 struct lpfc_mqe *mqe;
7189 uint32_t ftr_rsp = 0;
7190 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7191 struct lpfc_vport *vport = phba->pport;
7192 struct lpfc_dmabuf *mp;
7193 struct lpfc_rqb *rqbp;
7195 /* Perform a PCI function reset to start from clean */
7196 rc = lpfc_pci_function_reset(phba);
7200 /* Check the HBA Host Status Register for readyness */
7201 rc = lpfc_sli4_post_status_check(phba);
7205 spin_lock_irq(&phba->hbalock);
7206 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7207 spin_unlock_irq(&phba->hbalock);
7211 * Allocate a single mailbox container for initializing the
7214 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7218 /* Issue READ_REV to collect vpd and FW information. */
7219 vpd_size = SLI4_PAGE_SIZE;
7220 vpd = kzalloc(vpd_size, GFP_KERNEL);
7226 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7232 mqe = &mboxq->u.mqe;
7233 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7234 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7235 phba->hba_flag |= HBA_FCOE_MODE;
7236 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7238 phba->hba_flag &= ~HBA_FCOE_MODE;
7241 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7243 phba->hba_flag |= HBA_FIP_SUPPORT;
7245 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7247 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7249 if (phba->sli_rev != LPFC_SLI_REV4) {
7250 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7251 "0376 READ_REV Error. SLI Level %d "
7252 "FCoE enabled %d\n",
7253 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7260 * Continue initialization with default values even if driver failed
7261 * to read FCoE param config regions, only read parameters if the
7264 if (phba->hba_flag & HBA_FCOE_MODE &&
7265 lpfc_sli4_read_fcoe_params(phba))
7266 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7267 "2570 Failed to read FCoE parameters\n");
7270 * Retrieve sli4 device physical port name, failure of doing it
7271 * is considered as non-fatal.
7273 rc = lpfc_sli4_retrieve_pport_name(phba);
7275 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7276 "3080 Successful retrieving SLI4 device "
7277 "physical port name: %s.\n", phba->Port);
7280 * Evaluate the read rev and vpd data. Populate the driver
7281 * state with the results. If this routine fails, the failure
7282 * is not fatal as the driver will use generic values.
7284 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7285 if (unlikely(!rc)) {
7286 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7287 "0377 Error %d parsing vpd. "
7288 "Using defaults.\n", rc);
7293 /* Save information as VPD data */
7294 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7295 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7298 * This is because first G7 ASIC doesn't support the standard
7299 * 0x5a NVME cmd descriptor type/subtype
7301 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7302 LPFC_SLI_INTF_IF_TYPE_6) &&
7303 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7304 (phba->vpd.rev.smRev == 0) &&
7305 (phba->cfg_nvme_embed_cmd == 1))
7306 phba->cfg_nvme_embed_cmd = 0;
7308 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7309 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7311 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7313 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7315 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7317 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7318 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7319 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7320 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7321 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7322 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7323 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7324 "(%d):0380 READ_REV Status x%x "
7325 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7326 mboxq->vport ? mboxq->vport->vpi : 0,
7327 bf_get(lpfc_mqe_status, mqe),
7328 phba->vpd.rev.opFwName,
7329 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7330 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7332 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7333 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7334 if (phba->pport->cfg_lun_queue_depth > rc) {
7335 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7336 "3362 LUN queue depth changed from %d to %d\n",
7337 phba->pport->cfg_lun_queue_depth, rc);
7338 phba->pport->cfg_lun_queue_depth = rc;
7341 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7342 LPFC_SLI_INTF_IF_TYPE_0) {
7343 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7344 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7345 if (rc == MBX_SUCCESS) {
7346 phba->hba_flag |= HBA_RECOVERABLE_UE;
7347 /* Set 1Sec interval to detect UE */
7348 phba->eratt_poll_interval = 1;
7349 phba->sli4_hba.ue_to_sr = bf_get(
7350 lpfc_mbx_set_feature_UESR,
7351 &mboxq->u.mqe.un.set_feature);
7352 phba->sli4_hba.ue_to_rp = bf_get(
7353 lpfc_mbx_set_feature_UERP,
7354 &mboxq->u.mqe.un.set_feature);
7358 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7359 /* Enable MDS Diagnostics only if the SLI Port supports it */
7360 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7361 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7362 if (rc != MBX_SUCCESS)
7363 phba->mds_diags_support = 0;
7367 * Discover the port's supported feature set and match it against the
7370 lpfc_request_features(phba, mboxq);
7371 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7378 * The port must support FCP initiator mode as this is the
7379 * only mode running in the host.
7381 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7382 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7383 "0378 No support for fcpi mode.\n");
7387 /* Performance Hints are ONLY for FCoE */
7388 if (phba->hba_flag & HBA_FCOE_MODE) {
7389 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7390 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7392 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7396 * If the port cannot support the host's requested features
7397 * then turn off the global config parameters to disable the
7398 * feature in the driver. This is not a fatal error.
7400 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7401 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7402 phba->cfg_enable_bg = 0;
7403 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7408 if (phba->max_vpi && phba->cfg_enable_npiv &&
7409 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7413 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7414 "0379 Feature Mismatch Data: x%08x %08x "
7415 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7416 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7417 phba->cfg_enable_npiv, phba->max_vpi);
7418 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7419 phba->cfg_enable_bg = 0;
7420 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7421 phba->cfg_enable_npiv = 0;
7424 /* These SLI3 features are assumed in SLI4 */
7425 spin_lock_irq(&phba->hbalock);
7426 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7427 spin_unlock_irq(&phba->hbalock);
7430 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7431 * calls depends on these resources to complete port setup.
7433 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7435 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7436 "2920 Failed to alloc Resource IDs "
7441 lpfc_set_host_data(phba, mboxq);
7443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7445 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7446 "2134 Failed to set host os driver version %x",
7450 /* Read the port's service parameters. */
7451 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7453 phba->link_state = LPFC_HBA_ERROR;
7458 mboxq->vport = vport;
7459 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7460 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7461 if (rc == MBX_SUCCESS) {
7462 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7467 * This memory was allocated by the lpfc_read_sparam routine. Release
7468 * it to the mbuf pool.
7470 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7472 mboxq->ctx_buf = NULL;
7474 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7475 "0382 READ_SPARAM command failed "
7476 "status %d, mbxStatus x%x\n",
7477 rc, bf_get(lpfc_mqe_status, mqe));
7478 phba->link_state = LPFC_HBA_ERROR;
7483 lpfc_update_vport_wwn(vport);
7485 /* Update the fc_host data structures with new wwn. */
7486 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7487 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7489 /* Create all the SLI4 queues */
7490 rc = lpfc_sli4_queue_create(phba);
7492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7493 "3089 Failed to allocate queues\n");
7497 /* Set up all the queues to the device */
7498 rc = lpfc_sli4_queue_setup(phba);
7500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7501 "0381 Error %d during queue setup.\n ", rc);
7502 goto out_stop_timers;
7504 /* Initialize the driver internal SLI layer lists. */
7505 lpfc_sli4_setup(phba);
7506 lpfc_sli4_queue_init(phba);
7508 /* update host els xri-sgl sizes and mappings */
7509 rc = lpfc_sli4_els_sgl_update(phba);
7511 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7512 "1400 Failed to update xri-sgl size and "
7513 "mapping: %d\n", rc);
7514 goto out_destroy_queue;
7517 /* register the els sgl pool to the port */
7518 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7519 phba->sli4_hba.els_xri_cnt);
7520 if (unlikely(rc < 0)) {
7521 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7522 "0582 Error %d during els sgl post "
7525 goto out_destroy_queue;
7527 phba->sli4_hba.els_xri_cnt = rc;
7529 if (phba->nvmet_support) {
7530 /* update host nvmet xri-sgl sizes and mappings */
7531 rc = lpfc_sli4_nvmet_sgl_update(phba);
7533 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7534 "6308 Failed to update nvmet-sgl size "
7535 "and mapping: %d\n", rc);
7536 goto out_destroy_queue;
7539 /* register the nvmet sgl pool to the port */
7540 rc = lpfc_sli4_repost_sgl_list(
7542 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7543 phba->sli4_hba.nvmet_xri_cnt);
7544 if (unlikely(rc < 0)) {
7545 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7546 "3117 Error %d during nvmet "
7549 goto out_destroy_queue;
7551 phba->sli4_hba.nvmet_xri_cnt = rc;
7553 cnt = phba->cfg_iocb_cnt * 1024;
7554 /* We need 1 iocbq for every SGL, for IO processing */
7555 cnt += phba->sli4_hba.nvmet_xri_cnt;
7557 /* update host common xri-sgl sizes and mappings */
7558 rc = lpfc_sli4_common_sgl_update(phba);
7560 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7561 "6082 Failed to update nvme-sgl size "
7562 "and mapping: %d\n", rc);
7563 goto out_destroy_queue;
7566 /* register the allocated common sgl pool to the port */
7567 rc = lpfc_sli4_repost_common_sgl_list(phba);
7569 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7570 "6116 Error %d during nvme sgl post "
7572 /* Some NVME buffers were moved to abort nvme list */
7573 /* A pci function reset will repost them */
7575 goto out_destroy_queue;
7577 cnt = phba->cfg_iocb_cnt * 1024;
7580 if (!phba->sli.iocbq_lookup) {
7581 /* Initialize and populate the iocb list per host */
7582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7583 "2821 initialize iocb list %d total %d\n",
7584 phba->cfg_iocb_cnt, cnt);
7585 rc = lpfc_init_iocb_list(phba, cnt);
7587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7588 "1413 Failed to init iocb list.\n");
7589 goto out_destroy_queue;
7593 if (phba->nvmet_support)
7594 lpfc_nvmet_create_targetport(phba);
7596 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7597 /* Post initial buffers to all RQs created */
7598 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7599 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7600 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7601 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7602 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7603 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7604 rqbp->buffer_count = 0;
7606 lpfc_post_rq_buffer(
7607 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7608 phba->sli4_hba.nvmet_mrq_data[i],
7609 phba->cfg_nvmet_mrq_post, i);
7613 /* Post the rpi header region to the device. */
7614 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7616 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7617 "0393 Error %d during rpi post operation\n",
7620 goto out_destroy_queue;
7622 lpfc_sli4_node_prep(phba);
7624 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7625 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7627 * The FC Port needs to register FCFI (index 0)
7629 lpfc_reg_fcfi(phba, mboxq);
7630 mboxq->vport = phba->pport;
7631 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7632 if (rc != MBX_SUCCESS)
7633 goto out_unset_queue;
7635 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7636 &mboxq->u.mqe.un.reg_fcfi);
7638 /* We are a NVME Target mode with MRQ > 1 */
7640 /* First register the FCFI */
7641 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7642 mboxq->vport = phba->pport;
7643 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7644 if (rc != MBX_SUCCESS)
7645 goto out_unset_queue;
7647 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7648 &mboxq->u.mqe.un.reg_fcfi_mrq);
7650 /* Next register the MRQs */
7651 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7652 mboxq->vport = phba->pport;
7653 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7654 if (rc != MBX_SUCCESS)
7655 goto out_unset_queue;
7658 /* Check if the port is configured to be disabled */
7659 lpfc_sli_read_link_ste(phba);
7662 /* Arm the CQs and then EQs on device */
7663 lpfc_sli4_arm_cqeq_intr(phba);
7665 /* Indicate device interrupt mode */
7666 phba->sli4_hba.intr_enable = 1;
7668 /* Allow asynchronous mailbox command to go through */
7669 spin_lock_irq(&phba->hbalock);
7670 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7671 spin_unlock_irq(&phba->hbalock);
7673 /* Post receive buffers to the device */
7674 lpfc_sli4_rb_setup(phba);
7676 /* Reset HBA FCF states after HBA reset */
7677 phba->fcf.fcf_flag = 0;
7678 phba->fcf.current_rec.flag = 0;
7680 /* Start the ELS watchdog timer */
7681 mod_timer(&vport->els_tmofunc,
7682 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7684 /* Start heart beat timer */
7685 mod_timer(&phba->hb_tmofunc,
7686 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7687 phba->hb_outstanding = 0;
7688 phba->last_completion_time = jiffies;
7690 /* Start error attention (ERATT) polling timer */
7691 mod_timer(&phba->eratt_poll,
7692 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7694 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7695 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7696 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7698 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7699 "2829 This device supports "
7700 "Advanced Error Reporting (AER)\n");
7701 spin_lock_irq(&phba->hbalock);
7702 phba->hba_flag |= HBA_AER_ENABLED;
7703 spin_unlock_irq(&phba->hbalock);
7705 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7706 "2830 This device does not support "
7707 "Advanced Error Reporting (AER)\n");
7708 phba->cfg_aer_support = 0;
7714 * The port is ready, set the host's link state to LINK_DOWN
7715 * in preparation for link interrupts.
7717 spin_lock_irq(&phba->hbalock);
7718 phba->link_state = LPFC_LINK_DOWN;
7720 /* Check if physical ports are trunked */
7721 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7722 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7723 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7724 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7725 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7726 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7727 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7728 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7729 spin_unlock_irq(&phba->hbalock);
7731 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7732 (phba->hba_flag & LINK_DISABLED)) {
7733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7734 "3103 Adapter Link is disabled.\n");
7735 lpfc_down_link(phba, mboxq);
7736 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7737 if (rc != MBX_SUCCESS) {
7738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7739 "3104 Adapter failed to issue "
7740 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7741 goto out_unset_queue;
7743 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7744 /* don't perform init_link on SLI4 FC port loopback test */
7745 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7746 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7748 goto out_unset_queue;
7751 mempool_free(mboxq, phba->mbox_mem_pool);
7754 /* Unset all the queues set up in this routine when error out */
7755 lpfc_sli4_queue_unset(phba);
7757 lpfc_free_iocb_list(phba);
7758 lpfc_sli4_queue_destroy(phba);
7760 lpfc_stop_hba_timers(phba);
7762 mempool_free(mboxq, phba->mbox_mem_pool);
7767 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7768 * @ptr: context object - pointer to hba structure.
7770 * This is the callback function for mailbox timer. The mailbox
7771 * timer is armed when a new mailbox command is issued and the timer
7772 * is deleted when the mailbox complete. The function is called by
7773 * the kernel timer code when a mailbox does not complete within
7774 * expected time. This function wakes up the worker thread to
7775 * process the mailbox timeout and returns. All the processing is
7776 * done by the worker thread function lpfc_mbox_timeout_handler.
7779 lpfc_mbox_timeout(struct timer_list *t)
7781 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7782 unsigned long iflag;
7783 uint32_t tmo_posted;
7785 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7786 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7788 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7789 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7792 lpfc_worker_wake_up(phba);
7797 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7799 * @phba: Pointer to HBA context object.
7801 * This function checks if any mailbox completions are present on the mailbox
7805 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7809 struct lpfc_queue *mcq;
7810 struct lpfc_mcqe *mcqe;
7811 bool pending_completions = false;
7814 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7817 /* Check for completions on mailbox completion queue */
7819 mcq = phba->sli4_hba.mbx_cq;
7820 idx = mcq->hba_index;
7821 qe_valid = mcq->qe_valid;
7822 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
7823 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7824 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7825 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7826 pending_completions = true;
7829 idx = (idx + 1) % mcq->entry_count;
7830 if (mcq->hba_index == idx)
7833 /* if the index wrapped around, toggle the valid bit */
7834 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7835 qe_valid = (qe_valid) ? 0 : 1;
7837 return pending_completions;
7842 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7844 * @phba: Pointer to HBA context object.
7846 * For sli4, it is possible to miss an interrupt. As such mbox completions
7847 * maybe missed causing erroneous mailbox timeouts to occur. This function
7848 * checks to see if mbox completions are on the mailbox completion queue
7849 * and will process all the completions associated with the eq for the
7850 * mailbox completion queue.
7853 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7855 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7857 struct lpfc_queue *fpeq = NULL;
7858 struct lpfc_eqe *eqe;
7861 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7864 /* Find the eq associated with the mcq */
7866 if (sli4_hba->hba_eq)
7867 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7868 if (sli4_hba->hba_eq[eqidx]->queue_id ==
7869 sli4_hba->mbx_cq->assoc_qid) {
7870 fpeq = sli4_hba->hba_eq[eqidx];
7876 /* Turn off interrupts from this EQ */
7878 sli4_hba->sli4_eq_clr_intr(fpeq);
7880 /* Check to see if a mbox completion is pending */
7882 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7885 * If a mbox completion is pending, process all the events on EQ
7886 * associated with the mbox completion queue (this could include
7887 * mailbox commands, async events, els commands, receive queue data
7892 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7893 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7894 fpeq->EQ_processed++;
7897 /* Always clear and re-arm the EQ */
7899 sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7901 return mbox_pending;
7906 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7907 * @phba: Pointer to HBA context object.
7909 * This function is called from worker thread when a mailbox command times out.
7910 * The caller is not required to hold any locks. This function will reset the
7911 * HBA and recover all the pending commands.
7914 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7916 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7917 MAILBOX_t *mb = NULL;
7919 struct lpfc_sli *psli = &phba->sli;
7921 /* If the mailbox completed, process the completion and return */
7922 if (lpfc_sli4_process_missed_mbox_completions(phba))
7927 /* Check the pmbox pointer first. There is a race condition
7928 * between the mbox timeout handler getting executed in the
7929 * worklist and the mailbox actually completing. When this
7930 * race condition occurs, the mbox_active will be NULL.
7932 spin_lock_irq(&phba->hbalock);
7933 if (pmbox == NULL) {
7934 lpfc_printf_log(phba, KERN_WARNING,
7936 "0353 Active Mailbox cleared - mailbox timeout "
7938 spin_unlock_irq(&phba->hbalock);
7942 /* Mbox cmd <mbxCommand> timeout */
7943 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7944 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7946 phba->pport->port_state,
7948 phba->sli.mbox_active);
7949 spin_unlock_irq(&phba->hbalock);
7951 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7952 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7953 * it to fail all outstanding SCSI IO.
7955 spin_lock_irq(&phba->pport->work_port_lock);
7956 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7957 spin_unlock_irq(&phba->pport->work_port_lock);
7958 spin_lock_irq(&phba->hbalock);
7959 phba->link_state = LPFC_LINK_UNKNOWN;
7960 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7961 spin_unlock_irq(&phba->hbalock);
7963 lpfc_sli_abort_fcp_rings(phba);
7965 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7966 "0345 Resetting board due to mailbox timeout\n");
7968 /* Reset the HBA device */
7969 lpfc_reset_hba(phba);
7973 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7974 * @phba: Pointer to HBA context object.
7975 * @pmbox: Pointer to mailbox object.
7976 * @flag: Flag indicating how the mailbox need to be processed.
7978 * This function is called by discovery code and HBA management code
7979 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7980 * function gets the hbalock to protect the data structures.
7981 * The mailbox command can be submitted in polling mode, in which case
7982 * this function will wait in a polling loop for the completion of the
7984 * If the mailbox is submitted in no_wait mode (not polling) the
7985 * function will submit the command and returns immediately without waiting
7986 * for the mailbox completion. The no_wait is supported only when HBA
7987 * is in SLI2/SLI3 mode - interrupts are enabled.
7988 * The SLI interface allows only one mailbox pending at a time. If the
7989 * mailbox is issued in polling mode and there is already a mailbox
7990 * pending, then the function will return an error. If the mailbox is issued
7991 * in NO_WAIT mode and there is a mailbox pending already, the function
7992 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7993 * The sli layer owns the mailbox object until the completion of mailbox
7994 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7995 * return codes the caller owns the mailbox command after the return of
7999 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8003 struct lpfc_sli *psli = &phba->sli;
8004 uint32_t status, evtctr;
8005 uint32_t ha_copy, hc_copy;
8007 unsigned long timeout;
8008 unsigned long drvr_flag = 0;
8009 uint32_t word0, ldata;
8010 void __iomem *to_slim;
8011 int processing_queue = 0;
8013 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8015 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8016 /* processing mbox queue from intr_handler */
8017 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8018 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8021 processing_queue = 1;
8022 pmbox = lpfc_mbox_get(phba);
8024 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8029 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8030 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8032 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8033 lpfc_printf_log(phba, KERN_ERR,
8034 LOG_MBOX | LOG_VPORT,
8035 "1806 Mbox x%x failed. No vport\n",
8036 pmbox->u.mb.mbxCommand);
8038 goto out_not_finished;
8042 /* If the PCI channel is in offline state, do not post mbox. */
8043 if (unlikely(pci_channel_offline(phba->pcidev))) {
8044 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8045 goto out_not_finished;
8048 /* If HBA has a deferred error attention, fail the iocb. */
8049 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8050 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8051 goto out_not_finished;
8057 status = MBX_SUCCESS;
8059 if (phba->link_state == LPFC_HBA_ERROR) {
8060 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8062 /* Mbox command <mbxCommand> cannot issue */
8063 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8064 "(%d):0311 Mailbox command x%x cannot "
8065 "issue Data: x%x x%x\n",
8066 pmbox->vport ? pmbox->vport->vpi : 0,
8067 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8068 goto out_not_finished;
8071 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8072 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8073 !(hc_copy & HC_MBINT_ENA)) {
8074 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8075 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8076 "(%d):2528 Mailbox command x%x cannot "
8077 "issue Data: x%x x%x\n",
8078 pmbox->vport ? pmbox->vport->vpi : 0,
8079 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8080 goto out_not_finished;
8084 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8085 /* Polling for a mbox command when another one is already active
8086 * is not allowed in SLI. Also, the driver must have established
8087 * SLI2 mode to queue and process multiple mbox commands.
8090 if (flag & MBX_POLL) {
8091 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8093 /* Mbox command <mbxCommand> cannot issue */
8094 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8095 "(%d):2529 Mailbox command x%x "
8096 "cannot issue Data: x%x x%x\n",
8097 pmbox->vport ? pmbox->vport->vpi : 0,
8098 pmbox->u.mb.mbxCommand,
8099 psli->sli_flag, flag);
8100 goto out_not_finished;
8103 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8104 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8105 /* Mbox command <mbxCommand> cannot issue */
8106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8107 "(%d):2530 Mailbox command x%x "
8108 "cannot issue Data: x%x x%x\n",
8109 pmbox->vport ? pmbox->vport->vpi : 0,
8110 pmbox->u.mb.mbxCommand,
8111 psli->sli_flag, flag);
8112 goto out_not_finished;
8115 /* Another mailbox command is still being processed, queue this
8116 * command to be processed later.
8118 lpfc_mbox_put(phba, pmbox);
8120 /* Mbox cmd issue - BUSY */
8121 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8122 "(%d):0308 Mbox cmd issue - BUSY Data: "
8123 "x%x x%x x%x x%x\n",
8124 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8126 phba->pport ? phba->pport->port_state : 0xff,
8127 psli->sli_flag, flag);
8129 psli->slistat.mbox_busy++;
8130 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8133 lpfc_debugfs_disc_trc(pmbox->vport,
8134 LPFC_DISC_TRC_MBOX_VPORT,
8135 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8136 (uint32_t)mbx->mbxCommand,
8137 mbx->un.varWords[0], mbx->un.varWords[1]);
8140 lpfc_debugfs_disc_trc(phba->pport,
8142 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8143 (uint32_t)mbx->mbxCommand,
8144 mbx->un.varWords[0], mbx->un.varWords[1]);
8150 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8152 /* If we are not polling, we MUST be in SLI2 mode */
8153 if (flag != MBX_POLL) {
8154 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8155 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8156 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8157 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8158 /* Mbox command <mbxCommand> cannot issue */
8159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8160 "(%d):2531 Mailbox command x%x "
8161 "cannot issue Data: x%x x%x\n",
8162 pmbox->vport ? pmbox->vport->vpi : 0,
8163 pmbox->u.mb.mbxCommand,
8164 psli->sli_flag, flag);
8165 goto out_not_finished;
8167 /* timeout active mbox command */
8168 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8170 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8173 /* Mailbox cmd <cmd> issue */
8174 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8175 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8177 pmbox->vport ? pmbox->vport->vpi : 0,
8179 phba->pport ? phba->pport->port_state : 0xff,
8180 psli->sli_flag, flag);
8182 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8184 lpfc_debugfs_disc_trc(pmbox->vport,
8185 LPFC_DISC_TRC_MBOX_VPORT,
8186 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8187 (uint32_t)mbx->mbxCommand,
8188 mbx->un.varWords[0], mbx->un.varWords[1]);
8191 lpfc_debugfs_disc_trc(phba->pport,
8193 "MBOX Send: cmd:x%x mb:x%x x%x",
8194 (uint32_t)mbx->mbxCommand,
8195 mbx->un.varWords[0], mbx->un.varWords[1]);
8199 psli->slistat.mbox_cmd++;
8200 evtctr = psli->slistat.mbox_event;
8202 /* next set own bit for the adapter and copy over command word */
8203 mbx->mbxOwner = OWN_CHIP;
8205 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8206 /* Populate mbox extension offset word. */
8207 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8208 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8209 = (uint8_t *)phba->mbox_ext
8210 - (uint8_t *)phba->mbox;
8213 /* Copy the mailbox extension data */
8214 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8215 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8216 (uint8_t *)phba->mbox_ext,
8217 pmbox->in_ext_byte_len);
8219 /* Copy command data to host SLIM area */
8220 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8222 /* Populate mbox extension offset word. */
8223 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8224 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8225 = MAILBOX_HBA_EXT_OFFSET;
8227 /* Copy the mailbox extension data */
8228 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8229 lpfc_memcpy_to_slim(phba->MBslimaddr +
8230 MAILBOX_HBA_EXT_OFFSET,
8231 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8233 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8234 /* copy command data into host mbox for cmpl */
8235 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8238 /* First copy mbox command data to HBA SLIM, skip past first
8240 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8241 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8242 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8244 /* Next copy over first word, with mbxOwner set */
8245 ldata = *((uint32_t *)mbx);
8246 to_slim = phba->MBslimaddr;
8247 writel(ldata, to_slim);
8248 readl(to_slim); /* flush */
8250 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8251 /* switch over to host mailbox */
8252 psli->sli_flag |= LPFC_SLI_ACTIVE;
8259 /* Set up reference to mailbox command */
8260 psli->mbox_active = pmbox;
8261 /* Interrupt board to do it */
8262 writel(CA_MBATT, phba->CAregaddr);
8263 readl(phba->CAregaddr); /* flush */
8264 /* Don't wait for it to finish, just return */
8268 /* Set up null reference to mailbox command */
8269 psli->mbox_active = NULL;
8270 /* Interrupt board to do it */
8271 writel(CA_MBATT, phba->CAregaddr);
8272 readl(phba->CAregaddr); /* flush */
8274 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8275 /* First read mbox status word */
8276 word0 = *((uint32_t *)phba->mbox);
8277 word0 = le32_to_cpu(word0);
8279 /* First read mbox status word */
8280 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8281 spin_unlock_irqrestore(&phba->hbalock,
8283 goto out_not_finished;
8287 /* Read the HBA Host Attention Register */
8288 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8289 spin_unlock_irqrestore(&phba->hbalock,
8291 goto out_not_finished;
8293 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8296 /* Wait for command to complete */
8297 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8298 (!(ha_copy & HA_MBATT) &&
8299 (phba->link_state > LPFC_WARM_START))) {
8300 if (time_after(jiffies, timeout)) {
8301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8302 spin_unlock_irqrestore(&phba->hbalock,
8304 goto out_not_finished;
8307 /* Check if we took a mbox interrupt while we were
8309 if (((word0 & OWN_CHIP) != OWN_CHIP)
8310 && (evtctr != psli->slistat.mbox_event))
8314 spin_unlock_irqrestore(&phba->hbalock,
8317 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8320 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8321 /* First copy command data */
8322 word0 = *((uint32_t *)phba->mbox);
8323 word0 = le32_to_cpu(word0);
8324 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8327 /* Check real SLIM for any errors */
8328 slimword0 = readl(phba->MBslimaddr);
8329 slimmb = (MAILBOX_t *) & slimword0;
8330 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8331 && slimmb->mbxStatus) {
8338 /* First copy command data */
8339 word0 = readl(phba->MBslimaddr);
8341 /* Read the HBA Host Attention Register */
8342 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8343 spin_unlock_irqrestore(&phba->hbalock,
8345 goto out_not_finished;
8349 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8350 /* copy results back to user */
8351 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8353 /* Copy the mailbox extension data */
8354 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8355 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8357 pmbox->out_ext_byte_len);
8360 /* First copy command data */
8361 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8363 /* Copy the mailbox extension data */
8364 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8365 lpfc_memcpy_from_slim(
8368 MAILBOX_HBA_EXT_OFFSET,
8369 pmbox->out_ext_byte_len);
8373 writel(HA_MBATT, phba->HAregaddr);
8374 readl(phba->HAregaddr); /* flush */
8376 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8377 status = mbx->mbxStatus;
8380 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8384 if (processing_queue) {
8385 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8386 lpfc_mbox_cmpl_put(phba, pmbox);
8388 return MBX_NOT_FINISHED;
8392 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8393 * @phba: Pointer to HBA context object.
8395 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8396 * the driver internal pending mailbox queue. It will then try to wait out the
8397 * possible outstanding mailbox command before return.
8400 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8401 * the outstanding mailbox command timed out.
8404 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8406 struct lpfc_sli *psli = &phba->sli;
8408 unsigned long timeout = 0;
8410 /* Mark the asynchronous mailbox command posting as blocked */
8411 spin_lock_irq(&phba->hbalock);
8412 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8413 /* Determine how long we might wait for the active mailbox
8414 * command to be gracefully completed by firmware.
8416 if (phba->sli.mbox_active)
8417 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8418 phba->sli.mbox_active) *
8420 spin_unlock_irq(&phba->hbalock);
8422 /* Make sure the mailbox is really active */
8424 lpfc_sli4_process_missed_mbox_completions(phba);
8426 /* Wait for the outstnading mailbox command to complete */
8427 while (phba->sli.mbox_active) {
8428 /* Check active mailbox complete status every 2ms */
8430 if (time_after(jiffies, timeout)) {
8431 /* Timeout, marked the outstanding cmd not complete */
8437 /* Can not cleanly block async mailbox command, fails it */
8439 spin_lock_irq(&phba->hbalock);
8440 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8441 spin_unlock_irq(&phba->hbalock);
8447 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8448 * @phba: Pointer to HBA context object.
8450 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8451 * commands from the driver internal pending mailbox queue. It makes sure
8452 * that there is no outstanding mailbox command before resuming posting
8453 * asynchronous mailbox commands. If, for any reason, there is outstanding
8454 * mailbox command, it will try to wait it out before resuming asynchronous
8455 * mailbox command posting.
8458 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8460 struct lpfc_sli *psli = &phba->sli;
8462 spin_lock_irq(&phba->hbalock);
8463 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8464 /* Asynchronous mailbox posting is not blocked, do nothing */
8465 spin_unlock_irq(&phba->hbalock);
8469 /* Outstanding synchronous mailbox command is guaranteed to be done,
8470 * successful or timeout, after timing-out the outstanding mailbox
8471 * command shall always be removed, so just unblock posting async
8472 * mailbox command and resume
8474 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8475 spin_unlock_irq(&phba->hbalock);
8477 /* wake up worker thread to post asynchronlous mailbox command */
8478 lpfc_worker_wake_up(phba);
8482 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8483 * @phba: Pointer to HBA context object.
8484 * @mboxq: Pointer to mailbox object.
8486 * The function waits for the bootstrap mailbox register ready bit from
8487 * port for twice the regular mailbox command timeout value.
8489 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8490 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8493 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8496 unsigned long timeout;
8497 struct lpfc_register bmbx_reg;
8499 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8503 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8504 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8508 if (time_after(jiffies, timeout))
8509 return MBXERR_ERROR;
8510 } while (!db_ready);
8516 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8517 * @phba: Pointer to HBA context object.
8518 * @mboxq: Pointer to mailbox object.
8520 * The function posts a mailbox to the port. The mailbox is expected
8521 * to be comletely filled in and ready for the port to operate on it.
8522 * This routine executes a synchronous completion operation on the
8523 * mailbox by polling for its completion.
8525 * The caller must not be holding any locks when calling this routine.
8528 * MBX_SUCCESS - mailbox posted successfully
8529 * Any of the MBX error values.
8532 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8534 int rc = MBX_SUCCESS;
8535 unsigned long iflag;
8536 uint32_t mcqe_status;
8538 struct lpfc_sli *psli = &phba->sli;
8539 struct lpfc_mqe *mb = &mboxq->u.mqe;
8540 struct lpfc_bmbx_create *mbox_rgn;
8541 struct dma_address *dma_address;
8544 * Only one mailbox can be active to the bootstrap mailbox region
8545 * at a time and there is no queueing provided.
8547 spin_lock_irqsave(&phba->hbalock, iflag);
8548 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8549 spin_unlock_irqrestore(&phba->hbalock, iflag);
8550 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8551 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8552 "cannot issue Data: x%x x%x\n",
8553 mboxq->vport ? mboxq->vport->vpi : 0,
8554 mboxq->u.mb.mbxCommand,
8555 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8556 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8557 psli->sli_flag, MBX_POLL);
8558 return MBXERR_ERROR;
8560 /* The server grabs the token and owns it until release */
8561 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8562 phba->sli.mbox_active = mboxq;
8563 spin_unlock_irqrestore(&phba->hbalock, iflag);
8565 /* wait for bootstrap mbox register for readyness */
8566 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8571 * Initialize the bootstrap memory region to avoid stale data areas
8572 * in the mailbox post. Then copy the caller's mailbox contents to
8573 * the bmbx mailbox region.
8575 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8576 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8577 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8578 sizeof(struct lpfc_mqe));
8580 /* Post the high mailbox dma address to the port and wait for ready. */
8581 dma_address = &phba->sli4_hba.bmbx.dma_address;
8582 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8584 /* wait for bootstrap mbox register for hi-address write done */
8585 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8589 /* Post the low mailbox dma address to the port. */
8590 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8592 /* wait for bootstrap mbox register for low address write done */
8593 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8598 * Read the CQ to ensure the mailbox has completed.
8599 * If so, update the mailbox status so that the upper layers
8600 * can complete the request normally.
8602 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8603 sizeof(struct lpfc_mqe));
8604 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8605 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8606 sizeof(struct lpfc_mcqe));
8607 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8609 * When the CQE status indicates a failure and the mailbox status
8610 * indicates success then copy the CQE status into the mailbox status
8611 * (and prefix it with x4000).
8613 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8614 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8615 bf_set(lpfc_mqe_status, mb,
8616 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8619 lpfc_sli4_swap_str(phba, mboxq);
8621 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8622 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8623 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8624 " x%x x%x CQ: x%x x%x x%x x%x\n",
8625 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8626 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8627 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8628 bf_get(lpfc_mqe_status, mb),
8629 mb->un.mb_words[0], mb->un.mb_words[1],
8630 mb->un.mb_words[2], mb->un.mb_words[3],
8631 mb->un.mb_words[4], mb->un.mb_words[5],
8632 mb->un.mb_words[6], mb->un.mb_words[7],
8633 mb->un.mb_words[8], mb->un.mb_words[9],
8634 mb->un.mb_words[10], mb->un.mb_words[11],
8635 mb->un.mb_words[12], mboxq->mcqe.word0,
8636 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8637 mboxq->mcqe.trailer);
8639 /* We are holding the token, no needed for lock when release */
8640 spin_lock_irqsave(&phba->hbalock, iflag);
8641 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8642 phba->sli.mbox_active = NULL;
8643 spin_unlock_irqrestore(&phba->hbalock, iflag);
8648 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8649 * @phba: Pointer to HBA context object.
8650 * @pmbox: Pointer to mailbox object.
8651 * @flag: Flag indicating how the mailbox need to be processed.
8653 * This function is called by discovery code and HBA management code to submit
8654 * a mailbox command to firmware with SLI-4 interface spec.
8656 * Return codes the caller owns the mailbox command after the return of the
8660 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8663 struct lpfc_sli *psli = &phba->sli;
8664 unsigned long iflags;
8667 /* dump from issue mailbox command if setup */
8668 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8670 rc = lpfc_mbox_dev_check(phba);
8672 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8673 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8674 "cannot issue Data: x%x x%x\n",
8675 mboxq->vport ? mboxq->vport->vpi : 0,
8676 mboxq->u.mb.mbxCommand,
8677 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8678 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8679 psli->sli_flag, flag);
8680 goto out_not_finished;
8683 /* Detect polling mode and jump to a handler */
8684 if (!phba->sli4_hba.intr_enable) {
8685 if (flag == MBX_POLL)
8686 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8689 if (rc != MBX_SUCCESS)
8690 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8691 "(%d):2541 Mailbox command x%x "
8692 "(x%x/x%x) failure: "
8693 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8695 mboxq->vport ? mboxq->vport->vpi : 0,
8696 mboxq->u.mb.mbxCommand,
8697 lpfc_sli_config_mbox_subsys_get(phba,
8699 lpfc_sli_config_mbox_opcode_get(phba,
8701 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8702 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8703 bf_get(lpfc_mcqe_ext_status,
8705 psli->sli_flag, flag);
8707 } else if (flag == MBX_POLL) {
8708 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8709 "(%d):2542 Try to issue mailbox command "
8710 "x%x (x%x/x%x) synchronously ahead of async "
8711 "mailbox command queue: x%x x%x\n",
8712 mboxq->vport ? mboxq->vport->vpi : 0,
8713 mboxq->u.mb.mbxCommand,
8714 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8715 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8716 psli->sli_flag, flag);
8717 /* Try to block the asynchronous mailbox posting */
8718 rc = lpfc_sli4_async_mbox_block(phba);
8720 /* Successfully blocked, now issue sync mbox cmd */
8721 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8722 if (rc != MBX_SUCCESS)
8723 lpfc_printf_log(phba, KERN_WARNING,
8725 "(%d):2597 Sync Mailbox command "
8726 "x%x (x%x/x%x) failure: "
8727 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8729 mboxq->vport ? mboxq->vport->vpi : 0,
8730 mboxq->u.mb.mbxCommand,
8731 lpfc_sli_config_mbox_subsys_get(phba,
8733 lpfc_sli_config_mbox_opcode_get(phba,
8735 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8736 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8737 bf_get(lpfc_mcqe_ext_status,
8739 psli->sli_flag, flag);
8740 /* Unblock the async mailbox posting afterward */
8741 lpfc_sli4_async_mbox_unblock(phba);
8746 /* Now, interrupt mode asynchrous mailbox command */
8747 rc = lpfc_mbox_cmd_check(phba, mboxq);
8749 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8750 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8751 "cannot issue Data: x%x x%x\n",
8752 mboxq->vport ? mboxq->vport->vpi : 0,
8753 mboxq->u.mb.mbxCommand,
8754 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8755 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8756 psli->sli_flag, flag);
8757 goto out_not_finished;
8760 /* Put the mailbox command to the driver internal FIFO */
8761 psli->slistat.mbox_busy++;
8762 spin_lock_irqsave(&phba->hbalock, iflags);
8763 lpfc_mbox_put(phba, mboxq);
8764 spin_unlock_irqrestore(&phba->hbalock, iflags);
8765 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8766 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8767 "x%x (x%x/x%x) x%x x%x x%x\n",
8768 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8769 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8770 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8771 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8772 phba->pport->port_state,
8773 psli->sli_flag, MBX_NOWAIT);
8774 /* Wake up worker thread to transport mailbox command from head */
8775 lpfc_worker_wake_up(phba);
8780 return MBX_NOT_FINISHED;
8784 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8785 * @phba: Pointer to HBA context object.
8787 * This function is called by worker thread to send a mailbox command to
8788 * SLI4 HBA firmware.
8792 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8794 struct lpfc_sli *psli = &phba->sli;
8795 LPFC_MBOXQ_t *mboxq;
8796 int rc = MBX_SUCCESS;
8797 unsigned long iflags;
8798 struct lpfc_mqe *mqe;
8801 /* Check interrupt mode before post async mailbox command */
8802 if (unlikely(!phba->sli4_hba.intr_enable))
8803 return MBX_NOT_FINISHED;
8805 /* Check for mailbox command service token */
8806 spin_lock_irqsave(&phba->hbalock, iflags);
8807 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8808 spin_unlock_irqrestore(&phba->hbalock, iflags);
8809 return MBX_NOT_FINISHED;
8811 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8812 spin_unlock_irqrestore(&phba->hbalock, iflags);
8813 return MBX_NOT_FINISHED;
8815 if (unlikely(phba->sli.mbox_active)) {
8816 spin_unlock_irqrestore(&phba->hbalock, iflags);
8817 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8818 "0384 There is pending active mailbox cmd\n");
8819 return MBX_NOT_FINISHED;
8821 /* Take the mailbox command service token */
8822 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8824 /* Get the next mailbox command from head of queue */
8825 mboxq = lpfc_mbox_get(phba);
8827 /* If no more mailbox command waiting for post, we're done */
8829 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8830 spin_unlock_irqrestore(&phba->hbalock, iflags);
8833 phba->sli.mbox_active = mboxq;
8834 spin_unlock_irqrestore(&phba->hbalock, iflags);
8836 /* Check device readiness for posting mailbox command */
8837 rc = lpfc_mbox_dev_check(phba);
8839 /* Driver clean routine will clean up pending mailbox */
8840 goto out_not_finished;
8842 /* Prepare the mbox command to be posted */
8843 mqe = &mboxq->u.mqe;
8844 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8846 /* Start timer for the mbox_tmo and log some mailbox post messages */
8847 mod_timer(&psli->mbox_tmo, (jiffies +
8848 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8850 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8851 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8853 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8854 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8855 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8856 phba->pport->port_state, psli->sli_flag);
8858 if (mbx_cmnd != MBX_HEARTBEAT) {
8860 lpfc_debugfs_disc_trc(mboxq->vport,
8861 LPFC_DISC_TRC_MBOX_VPORT,
8862 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8863 mbx_cmnd, mqe->un.mb_words[0],
8864 mqe->un.mb_words[1]);
8866 lpfc_debugfs_disc_trc(phba->pport,
8868 "MBOX Send: cmd:x%x mb:x%x x%x",
8869 mbx_cmnd, mqe->un.mb_words[0],
8870 mqe->un.mb_words[1]);
8873 psli->slistat.mbox_cmd++;
8875 /* Post the mailbox command to the port */
8876 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8877 if (rc != MBX_SUCCESS) {
8878 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8879 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8880 "cannot issue Data: x%x x%x\n",
8881 mboxq->vport ? mboxq->vport->vpi : 0,
8882 mboxq->u.mb.mbxCommand,
8883 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8884 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8885 psli->sli_flag, MBX_NOWAIT);
8886 goto out_not_finished;
8892 spin_lock_irqsave(&phba->hbalock, iflags);
8893 if (phba->sli.mbox_active) {
8894 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8895 __lpfc_mbox_cmpl_put(phba, mboxq);
8896 /* Release the token */
8897 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8898 phba->sli.mbox_active = NULL;
8900 spin_unlock_irqrestore(&phba->hbalock, iflags);
8902 return MBX_NOT_FINISHED;
8906 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8907 * @phba: Pointer to HBA context object.
8908 * @pmbox: Pointer to mailbox object.
8909 * @flag: Flag indicating how the mailbox need to be processed.
8911 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8912 * the API jump table function pointer from the lpfc_hba struct.
8914 * Return codes the caller owns the mailbox command after the return of the
8918 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8920 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8924 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8925 * @phba: The hba struct for which this call is being executed.
8926 * @dev_grp: The HBA PCI-Device group number.
8928 * This routine sets up the mbox interface API function jump table in @phba
8930 * Returns: 0 - success, -ENODEV - failure.
8933 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8937 case LPFC_PCI_DEV_LP:
8938 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8939 phba->lpfc_sli_handle_slow_ring_event =
8940 lpfc_sli_handle_slow_ring_event_s3;
8941 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8942 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8943 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8945 case LPFC_PCI_DEV_OC:
8946 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8947 phba->lpfc_sli_handle_slow_ring_event =
8948 lpfc_sli_handle_slow_ring_event_s4;
8949 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8950 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8951 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8955 "1420 Invalid HBA PCI-device group: 0x%x\n",
8964 * __lpfc_sli_ringtx_put - Add an iocb to the txq
8965 * @phba: Pointer to HBA context object.
8966 * @pring: Pointer to driver SLI ring object.
8967 * @piocb: Pointer to address of newly added command iocb.
8969 * This function is called with hbalock held to add a command
8970 * iocb to the txq when SLI layer cannot submit the command iocb
8974 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8975 struct lpfc_iocbq *piocb)
8977 lockdep_assert_held(&phba->hbalock);
8978 /* Insert the caller's iocb in the txq tail for later processing. */
8979 list_add_tail(&piocb->list, &pring->txq);
8983 * lpfc_sli_next_iocb - Get the next iocb in the txq
8984 * @phba: Pointer to HBA context object.
8985 * @pring: Pointer to driver SLI ring object.
8986 * @piocb: Pointer to address of newly added command iocb.
8988 * This function is called with hbalock held before a new
8989 * iocb is submitted to the firmware. This function checks
8990 * txq to flush the iocbs in txq to Firmware before
8991 * submitting new iocbs to the Firmware.
8992 * If there are iocbs in the txq which need to be submitted
8993 * to firmware, lpfc_sli_next_iocb returns the first element
8994 * of the txq after dequeuing it from txq.
8995 * If there is no iocb in the txq then the function will return
8996 * *piocb and *piocb is set to NULL. Caller needs to check
8997 * *piocb to find if there are more commands in the txq.
8999 static struct lpfc_iocbq *
9000 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9001 struct lpfc_iocbq **piocb)
9003 struct lpfc_iocbq * nextiocb;
9005 lockdep_assert_held(&phba->hbalock);
9007 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9017 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9018 * @phba: Pointer to HBA context object.
9019 * @ring_number: SLI ring number to issue iocb on.
9020 * @piocb: Pointer to command iocb.
9021 * @flag: Flag indicating if this command can be put into txq.
9023 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9024 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9025 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9026 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9027 * this function allows only iocbs for posting buffers. This function finds
9028 * next available slot in the command ring and posts the command to the
9029 * available slot and writes the port attention register to request HBA start
9030 * processing new iocb. If there is no slot available in the ring and
9031 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9032 * the function returns IOCB_BUSY.
9034 * This function is called with hbalock held. The function will return success
9035 * after it successfully submit the iocb to firmware or after adding to the
9039 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9040 struct lpfc_iocbq *piocb, uint32_t flag)
9042 struct lpfc_iocbq *nextiocb;
9044 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9046 lockdep_assert_held(&phba->hbalock);
9048 if (piocb->iocb_cmpl && (!piocb->vport) &&
9049 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9050 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9051 lpfc_printf_log(phba, KERN_ERR,
9052 LOG_SLI | LOG_VPORT,
9053 "1807 IOCB x%x failed. No vport\n",
9054 piocb->iocb.ulpCommand);
9060 /* If the PCI channel is in offline state, do not post iocbs. */
9061 if (unlikely(pci_channel_offline(phba->pcidev)))
9064 /* If HBA has a deferred error attention, fail the iocb. */
9065 if (unlikely(phba->hba_flag & DEFER_ERATT))
9069 * We should never get an IOCB if we are in a < LINK_DOWN state
9071 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9075 * Check to see if we are blocking IOCB processing because of a
9076 * outstanding event.
9078 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9081 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9083 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9084 * can be issued if the link is not up.
9086 switch (piocb->iocb.ulpCommand) {
9087 case CMD_GEN_REQUEST64_CR:
9088 case CMD_GEN_REQUEST64_CX:
9089 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9090 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9091 FC_RCTL_DD_UNSOL_CMD) ||
9092 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9093 MENLO_TRANSPORT_TYPE))
9097 case CMD_QUE_RING_BUF_CN:
9098 case CMD_QUE_RING_BUF64_CN:
9100 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9101 * completion, iocb_cmpl MUST be 0.
9103 if (piocb->iocb_cmpl)
9104 piocb->iocb_cmpl = NULL;
9106 case CMD_CREATE_XRI_CR:
9107 case CMD_CLOSE_XRI_CN:
9108 case CMD_CLOSE_XRI_CX:
9115 * For FCP commands, we must be in a state where we can process link
9118 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9119 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9123 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9124 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9125 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9128 lpfc_sli_update_ring(phba, pring);
9130 lpfc_sli_update_full_ring(phba, pring);
9133 return IOCB_SUCCESS;
9138 pring->stats.iocb_cmd_delay++;
9142 if (!(flag & SLI_IOCB_RET_IOCB)) {
9143 __lpfc_sli_ringtx_put(phba, pring, piocb);
9144 return IOCB_SUCCESS;
9151 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9152 * @phba: Pointer to HBA context object.
9153 * @piocb: Pointer to command iocb.
9154 * @sglq: Pointer to the scatter gather queue object.
9156 * This routine converts the bpl or bde that is in the IOCB
9157 * to a sgl list for the sli4 hardware. The physical address
9158 * of the bpl/bde is converted back to a virtual address.
9159 * If the IOCB contains a BPL then the list of BDE's is
9160 * converted to sli4_sge's. If the IOCB contains a single
9161 * BDE then it is converted to a single sli_sge.
9162 * The IOCB is still in cpu endianess so the contents of
9163 * the bpl can be used without byte swapping.
9165 * Returns valid XRI = Success, NO_XRI = Failure.
9168 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9169 struct lpfc_sglq *sglq)
9171 uint16_t xritag = NO_XRI;
9172 struct ulp_bde64 *bpl = NULL;
9173 struct ulp_bde64 bde;
9174 struct sli4_sge *sgl = NULL;
9175 struct lpfc_dmabuf *dmabuf;
9179 uint32_t offset = 0; /* accumulated offset in the sg request list */
9180 int inbound = 0; /* number of sg reply entries inbound from firmware */
9182 if (!piocbq || !sglq)
9185 sgl = (struct sli4_sge *)sglq->sgl;
9186 icmd = &piocbq->iocb;
9187 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9188 return sglq->sli4_xritag;
9189 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9190 numBdes = icmd->un.genreq64.bdl.bdeSize /
9191 sizeof(struct ulp_bde64);
9192 /* The addrHigh and addrLow fields within the IOCB
9193 * have not been byteswapped yet so there is no
9194 * need to swap them back.
9196 if (piocbq->context3)
9197 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9201 bpl = (struct ulp_bde64 *)dmabuf->virt;
9205 for (i = 0; i < numBdes; i++) {
9206 /* Should already be byte swapped. */
9207 sgl->addr_hi = bpl->addrHigh;
9208 sgl->addr_lo = bpl->addrLow;
9210 sgl->word2 = le32_to_cpu(sgl->word2);
9211 if ((i+1) == numBdes)
9212 bf_set(lpfc_sli4_sge_last, sgl, 1);
9214 bf_set(lpfc_sli4_sge_last, sgl, 0);
9215 /* swap the size field back to the cpu so we
9216 * can assign it to the sgl.
9218 bde.tus.w = le32_to_cpu(bpl->tus.w);
9219 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9220 /* The offsets in the sgl need to be accumulated
9221 * separately for the request and reply lists.
9222 * The request is always first, the reply follows.
9224 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9225 /* add up the reply sg entries */
9226 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9228 /* first inbound? reset the offset */
9231 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9232 bf_set(lpfc_sli4_sge_type, sgl,
9233 LPFC_SGE_TYPE_DATA);
9234 offset += bde.tus.f.bdeSize;
9236 sgl->word2 = cpu_to_le32(sgl->word2);
9240 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9241 /* The addrHigh and addrLow fields of the BDE have not
9242 * been byteswapped yet so they need to be swapped
9243 * before putting them in the sgl.
9246 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9248 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9249 sgl->word2 = le32_to_cpu(sgl->word2);
9250 bf_set(lpfc_sli4_sge_last, sgl, 1);
9251 sgl->word2 = cpu_to_le32(sgl->word2);
9253 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9255 return sglq->sli4_xritag;
9259 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9260 * @phba: Pointer to HBA context object.
9261 * @piocb: Pointer to command iocb.
9262 * @wqe: Pointer to the work queue entry.
9264 * This routine converts the iocb command to its Work Queue Entry
9265 * equivalent. The wqe pointer should not have any fields set when
9266 * this routine is called because it will memcpy over them.
9267 * This routine does not set the CQ_ID or the WQEC bits in the
9270 * Returns: 0 = Success, IOCB_ERROR = Failure.
9273 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9274 union lpfc_wqe128 *wqe)
9276 uint32_t xmit_len = 0, total_len = 0;
9280 uint8_t command_type = ELS_COMMAND_NON_FIP;
9283 uint16_t abrt_iotag;
9284 struct lpfc_iocbq *abrtiocbq;
9285 struct ulp_bde64 *bpl = NULL;
9286 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9288 struct ulp_bde64 bde;
9289 struct lpfc_nodelist *ndlp;
9293 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9294 /* The fcp commands will set command type */
9295 if (iocbq->iocb_flag & LPFC_IO_FCP)
9296 command_type = FCP_COMMAND;
9297 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9298 command_type = ELS_COMMAND_FIP;
9300 command_type = ELS_COMMAND_NON_FIP;
9302 if (phba->fcp_embed_io)
9303 memset(wqe, 0, sizeof(union lpfc_wqe128));
9304 /* Some of the fields are in the right position already */
9305 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9306 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9307 /* The ct field has moved so reset */
9308 wqe->generic.wqe_com.word7 = 0;
9309 wqe->generic.wqe_com.word10 = 0;
9312 abort_tag = (uint32_t) iocbq->iotag;
9313 xritag = iocbq->sli4_xritag;
9314 /* words0-2 bpl convert bde */
9315 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9316 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9317 sizeof(struct ulp_bde64);
9318 bpl = (struct ulp_bde64 *)
9319 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9323 /* Should already be byte swapped. */
9324 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9325 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9326 /* swap the size field back to the cpu so we
9327 * can assign it to the sgl.
9329 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9330 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9332 for (i = 0; i < numBdes; i++) {
9333 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9334 total_len += bde.tus.f.bdeSize;
9337 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9339 iocbq->iocb.ulpIoTag = iocbq->iotag;
9340 cmnd = iocbq->iocb.ulpCommand;
9342 switch (iocbq->iocb.ulpCommand) {
9343 case CMD_ELS_REQUEST64_CR:
9344 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9345 ndlp = iocbq->context_un.ndlp;
9347 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9348 if (!iocbq->iocb.ulpLe) {
9349 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9350 "2007 Only Limited Edition cmd Format"
9351 " supported 0x%x\n",
9352 iocbq->iocb.ulpCommand);
9356 wqe->els_req.payload_len = xmit_len;
9357 /* Els_reguest64 has a TMO */
9358 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9359 iocbq->iocb.ulpTimeout);
9360 /* Need a VF for word 4 set the vf bit*/
9361 bf_set(els_req64_vf, &wqe->els_req, 0);
9362 /* And a VFID for word 12 */
9363 bf_set(els_req64_vfid, &wqe->els_req, 0);
9364 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9365 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9366 iocbq->iocb.ulpContext);
9367 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9368 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9369 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9370 if (command_type == ELS_COMMAND_FIP)
9371 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9372 >> LPFC_FIP_ELS_ID_SHIFT);
9373 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9374 iocbq->context2)->virt);
9375 if_type = bf_get(lpfc_sli_intf_if_type,
9376 &phba->sli4_hba.sli_intf);
9377 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9378 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9379 *pcmd == ELS_CMD_SCR ||
9380 *pcmd == ELS_CMD_FDISC ||
9381 *pcmd == ELS_CMD_LOGO ||
9382 *pcmd == ELS_CMD_PLOGI)) {
9383 bf_set(els_req64_sp, &wqe->els_req, 1);
9384 bf_set(els_req64_sid, &wqe->els_req,
9385 iocbq->vport->fc_myDID);
9386 if ((*pcmd == ELS_CMD_FLOGI) &&
9387 !(phba->fc_topology ==
9388 LPFC_TOPOLOGY_LOOP))
9389 bf_set(els_req64_sid, &wqe->els_req, 0);
9390 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9391 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9392 phba->vpi_ids[iocbq->vport->vpi]);
9393 } else if (pcmd && iocbq->context1) {
9394 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9395 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9396 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9399 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9400 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9401 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9402 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9403 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9404 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9405 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9406 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9407 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9409 case CMD_XMIT_SEQUENCE64_CX:
9410 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9411 iocbq->iocb.un.ulpWord[3]);
9412 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9413 iocbq->iocb.unsli3.rcvsli3.ox_id);
9414 /* The entire sequence is transmitted for this IOCB */
9415 xmit_len = total_len;
9416 cmnd = CMD_XMIT_SEQUENCE64_CR;
9417 if (phba->link_flag & LS_LOOPBACK_MODE)
9418 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9419 case CMD_XMIT_SEQUENCE64_CR:
9420 /* word3 iocb=io_tag32 wqe=reserved */
9421 wqe->xmit_sequence.rsvd3 = 0;
9422 /* word4 relative_offset memcpy */
9423 /* word5 r_ctl/df_ctl memcpy */
9424 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9425 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9426 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9427 LPFC_WQE_IOD_WRITE);
9428 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9429 LPFC_WQE_LENLOC_WORD12);
9430 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9431 wqe->xmit_sequence.xmit_len = xmit_len;
9432 command_type = OTHER_COMMAND;
9434 case CMD_XMIT_BCAST64_CN:
9435 /* word3 iocb=iotag32 wqe=seq_payload_len */
9436 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9437 /* word4 iocb=rsvd wqe=rsvd */
9438 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9439 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9440 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9441 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9442 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9443 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9444 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9445 LPFC_WQE_LENLOC_WORD3);
9446 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9448 case CMD_FCP_IWRITE64_CR:
9449 command_type = FCP_COMMAND_DATA_OUT;
9450 /* word3 iocb=iotag wqe=payload_offset_len */
9451 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9452 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9453 xmit_len + sizeof(struct fcp_rsp));
9454 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9456 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9457 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9458 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9459 iocbq->iocb.ulpFCP2Rcvy);
9460 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9461 /* Always open the exchange */
9462 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9463 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9464 LPFC_WQE_LENLOC_WORD4);
9465 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9466 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9467 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9468 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9469 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9470 if (iocbq->priority) {
9471 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9472 (iocbq->priority << 1));
9474 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9475 (phba->cfg_XLanePriority << 1));
9478 /* Note, word 10 is already initialized to 0 */
9480 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9481 if (phba->cfg_enable_pbde)
9482 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9484 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9486 if (phba->fcp_embed_io) {
9487 struct lpfc_scsi_buf *lpfc_cmd;
9488 struct sli4_sge *sgl;
9489 struct fcp_cmnd *fcp_cmnd;
9492 /* 128 byte wqe support here */
9494 lpfc_cmd = iocbq->context1;
9495 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9496 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9498 /* Word 0-2 - FCP_CMND */
9499 wqe->generic.bde.tus.f.bdeFlags =
9500 BUFF_TYPE_BDE_IMMED;
9501 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9502 wqe->generic.bde.addrHigh = 0;
9503 wqe->generic.bde.addrLow = 88; /* Word 22 */
9505 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9506 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9508 /* Word 22-29 FCP CMND Payload */
9509 ptr = &wqe->words[22];
9510 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9513 case CMD_FCP_IREAD64_CR:
9514 /* word3 iocb=iotag wqe=payload_offset_len */
9515 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9516 bf_set(payload_offset_len, &wqe->fcp_iread,
9517 xmit_len + sizeof(struct fcp_rsp));
9518 bf_set(cmd_buff_len, &wqe->fcp_iread,
9520 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9521 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9522 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9523 iocbq->iocb.ulpFCP2Rcvy);
9524 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9525 /* Always open the exchange */
9526 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9527 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9528 LPFC_WQE_LENLOC_WORD4);
9529 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9530 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9531 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9532 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9533 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9534 if (iocbq->priority) {
9535 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9536 (iocbq->priority << 1));
9538 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9539 (phba->cfg_XLanePriority << 1));
9542 /* Note, word 10 is already initialized to 0 */
9544 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9545 if (phba->cfg_enable_pbde)
9546 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9548 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9550 if (phba->fcp_embed_io) {
9551 struct lpfc_scsi_buf *lpfc_cmd;
9552 struct sli4_sge *sgl;
9553 struct fcp_cmnd *fcp_cmnd;
9556 /* 128 byte wqe support here */
9558 lpfc_cmd = iocbq->context1;
9559 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9560 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9562 /* Word 0-2 - FCP_CMND */
9563 wqe->generic.bde.tus.f.bdeFlags =
9564 BUFF_TYPE_BDE_IMMED;
9565 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9566 wqe->generic.bde.addrHigh = 0;
9567 wqe->generic.bde.addrLow = 88; /* Word 22 */
9569 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9570 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9572 /* Word 22-29 FCP CMND Payload */
9573 ptr = &wqe->words[22];
9574 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9577 case CMD_FCP_ICMND64_CR:
9578 /* word3 iocb=iotag wqe=payload_offset_len */
9579 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9580 bf_set(payload_offset_len, &wqe->fcp_icmd,
9581 xmit_len + sizeof(struct fcp_rsp));
9582 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9584 /* word3 iocb=IO_TAG wqe=reserved */
9585 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9586 /* Always open the exchange */
9587 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9588 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9589 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9590 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9591 LPFC_WQE_LENLOC_NONE);
9592 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9593 iocbq->iocb.ulpFCP2Rcvy);
9594 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9595 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9596 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9597 if (iocbq->priority) {
9598 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9599 (iocbq->priority << 1));
9601 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9602 (phba->cfg_XLanePriority << 1));
9605 /* Note, word 10 is already initialized to 0 */
9607 if (phba->fcp_embed_io) {
9608 struct lpfc_scsi_buf *lpfc_cmd;
9609 struct sli4_sge *sgl;
9610 struct fcp_cmnd *fcp_cmnd;
9613 /* 128 byte wqe support here */
9615 lpfc_cmd = iocbq->context1;
9616 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9617 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9619 /* Word 0-2 - FCP_CMND */
9620 wqe->generic.bde.tus.f.bdeFlags =
9621 BUFF_TYPE_BDE_IMMED;
9622 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9623 wqe->generic.bde.addrHigh = 0;
9624 wqe->generic.bde.addrLow = 88; /* Word 22 */
9626 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9627 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9629 /* Word 22-29 FCP CMND Payload */
9630 ptr = &wqe->words[22];
9631 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9634 case CMD_GEN_REQUEST64_CR:
9635 /* For this command calculate the xmit length of the
9639 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9640 sizeof(struct ulp_bde64);
9641 for (i = 0; i < numBdes; i++) {
9642 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9643 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9645 xmit_len += bde.tus.f.bdeSize;
9647 /* word3 iocb=IO_TAG wqe=request_payload_len */
9648 wqe->gen_req.request_payload_len = xmit_len;
9649 /* word4 iocb=parameter wqe=relative_offset memcpy */
9650 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9651 /* word6 context tag copied in memcpy */
9652 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9653 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9654 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9655 "2015 Invalid CT %x command 0x%x\n",
9656 ct, iocbq->iocb.ulpCommand);
9659 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9660 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9661 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9662 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9663 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9664 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9665 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9666 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9667 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9668 command_type = OTHER_COMMAND;
9670 case CMD_XMIT_ELS_RSP64_CX:
9671 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9672 /* words0-2 BDE memcpy */
9673 /* word3 iocb=iotag32 wqe=response_payload_len */
9674 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9676 wqe->xmit_els_rsp.word4 = 0;
9677 /* word5 iocb=rsvd wge=did */
9678 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9679 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9681 if_type = bf_get(lpfc_sli_intf_if_type,
9682 &phba->sli4_hba.sli_intf);
9683 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9684 if (iocbq->vport->fc_flag & FC_PT2PT) {
9685 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9686 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9687 iocbq->vport->fc_myDID);
9688 if (iocbq->vport->fc_myDID == Fabric_DID) {
9690 &wqe->xmit_els_rsp.wqe_dest, 0);
9694 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9695 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9696 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9697 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9698 iocbq->iocb.unsli3.rcvsli3.ox_id);
9699 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9700 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9701 phba->vpi_ids[iocbq->vport->vpi]);
9702 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9703 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9704 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9705 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9706 LPFC_WQE_LENLOC_WORD3);
9707 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9708 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9709 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9710 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9711 iocbq->context2)->virt);
9712 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9713 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9714 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9715 iocbq->vport->fc_myDID);
9716 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9717 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9718 phba->vpi_ids[phba->pport->vpi]);
9720 command_type = OTHER_COMMAND;
9722 case CMD_CLOSE_XRI_CN:
9723 case CMD_ABORT_XRI_CN:
9724 case CMD_ABORT_XRI_CX:
9725 /* words 0-2 memcpy should be 0 rserved */
9726 /* port will send abts */
9727 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9728 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9729 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9730 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9734 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9736 * The link is down, or the command was ELS_FIP
9737 * so the fw does not need to send abts
9740 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9742 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9743 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9744 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9745 wqe->abort_cmd.rsrvd5 = 0;
9746 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9747 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9748 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9750 * The abort handler will send us CMD_ABORT_XRI_CN or
9751 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9753 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9754 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9755 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9756 LPFC_WQE_LENLOC_NONE);
9757 cmnd = CMD_ABORT_XRI_CX;
9758 command_type = OTHER_COMMAND;
9761 case CMD_XMIT_BLS_RSP64_CX:
9762 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9763 /* As BLS ABTS RSP WQE is very different from other WQEs,
9764 * we re-construct this WQE here based on information in
9765 * iocbq from scratch.
9767 memset(wqe, 0, sizeof(union lpfc_wqe));
9768 /* OX_ID is invariable to who sent ABTS to CT exchange */
9769 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9770 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9771 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9772 LPFC_ABTS_UNSOL_INT) {
9773 /* ABTS sent by initiator to CT exchange, the
9774 * RX_ID field will be filled with the newly
9775 * allocated responder XRI.
9777 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9778 iocbq->sli4_xritag);
9780 /* ABTS sent by responder to CT exchange, the
9781 * RX_ID field will be filled with the responder
9784 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9785 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9787 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9788 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9791 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9793 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9794 iocbq->iocb.ulpContext);
9795 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9796 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9797 phba->vpi_ids[phba->pport->vpi]);
9798 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9799 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9800 LPFC_WQE_LENLOC_NONE);
9801 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9802 command_type = OTHER_COMMAND;
9803 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9804 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9805 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9806 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9807 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9808 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9809 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9813 case CMD_SEND_FRAME:
9814 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9815 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9817 case CMD_XRI_ABORTED_CX:
9818 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9819 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9820 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9821 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9822 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9825 "2014 Invalid command 0x%x\n",
9826 iocbq->iocb.ulpCommand);
9831 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9832 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9833 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9834 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9835 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9836 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9837 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9838 LPFC_IO_DIF_INSERT);
9839 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9840 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9841 wqe->generic.wqe_com.abort_tag = abort_tag;
9842 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9843 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9844 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9845 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9850 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9851 * @phba: Pointer to HBA context object.
9852 * @ring_number: SLI ring number to issue iocb on.
9853 * @piocb: Pointer to command iocb.
9854 * @flag: Flag indicating if this command can be put into txq.
9856 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9857 * an iocb command to an HBA with SLI-4 interface spec.
9859 * This function is called with hbalock held. The function will return success
9860 * after it successfully submit the iocb to firmware or after adding to the
9864 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9865 struct lpfc_iocbq *piocb, uint32_t flag)
9867 struct lpfc_sglq *sglq;
9868 union lpfc_wqe128 wqe;
9869 struct lpfc_queue *wq;
9870 struct lpfc_sli_ring *pring;
9873 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9874 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9875 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9876 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9878 wq = phba->sli4_hba.oas_wq;
9880 wq = phba->sli4_hba.els_wq;
9883 /* Get corresponding ring */
9887 * The WQE can be either 64 or 128 bytes,
9890 lockdep_assert_held(&phba->hbalock);
9892 if (piocb->sli4_xritag == NO_XRI) {
9893 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9894 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9897 if (!list_empty(&pring->txq)) {
9898 if (!(flag & SLI_IOCB_RET_IOCB)) {
9899 __lpfc_sli_ringtx_put(phba,
9901 return IOCB_SUCCESS;
9906 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9908 if (!(flag & SLI_IOCB_RET_IOCB)) {
9909 __lpfc_sli_ringtx_put(phba,
9912 return IOCB_SUCCESS;
9918 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9919 /* These IO's already have an XRI and a mapped sgl. */
9923 * This is a continuation of a commandi,(CX) so this
9924 * sglq is on the active list
9926 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9932 piocb->sli4_lxritag = sglq->sli4_lxritag;
9933 piocb->sli4_xritag = sglq->sli4_xritag;
9934 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9938 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9941 if (lpfc_sli4_wq_put(wq, &wqe))
9943 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9949 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9951 * This routine wraps the actual lockless version for issusing IOCB function
9952 * pointer from the lpfc_hba struct.
9955 * IOCB_ERROR - Error
9956 * IOCB_SUCCESS - Success
9960 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9961 struct lpfc_iocbq *piocb, uint32_t flag)
9963 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9967 * lpfc_sli_api_table_setup - Set up sli api function jump table
9968 * @phba: The hba struct for which this call is being executed.
9969 * @dev_grp: The HBA PCI-Device group number.
9971 * This routine sets up the SLI interface API function jump table in @phba
9973 * Returns: 0 - success, -ENODEV - failure.
9976 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9980 case LPFC_PCI_DEV_LP:
9981 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9982 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9984 case LPFC_PCI_DEV_OC:
9985 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9986 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9990 "1419 Invalid HBA PCI-device group: 0x%x\n",
9995 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10000 * lpfc_sli4_calc_ring - Calculates which ring to use
10001 * @phba: Pointer to HBA context object.
10002 * @piocb: Pointer to command iocb.
10004 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10005 * hba_wqidx, thus we need to calculate the corresponding ring.
10006 * Since ABORTS must go on the same WQ of the command they are
10007 * aborting, we use command's hba_wqidx.
10009 struct lpfc_sli_ring *
10010 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10012 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10013 if (!(phba->cfg_fof) ||
10014 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
10015 if (unlikely(!phba->sli4_hba.fcp_wq))
10018 * for abort iocb hba_wqidx should already
10019 * be setup based on what work queue we used.
10021 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10023 lpfc_sli4_scmd_to_wqidx_distr(phba,
10025 piocb->hba_wqidx = piocb->hba_wqidx %
10026 phba->cfg_fcp_io_channel;
10028 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
10030 if (unlikely(!phba->sli4_hba.oas_wq))
10032 piocb->hba_wqidx = 0;
10033 return phba->sli4_hba.oas_wq->pring;
10036 if (unlikely(!phba->sli4_hba.els_wq))
10038 piocb->hba_wqidx = 0;
10039 return phba->sli4_hba.els_wq->pring;
10044 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10045 * @phba: Pointer to HBA context object.
10046 * @pring: Pointer to driver SLI ring object.
10047 * @piocb: Pointer to command iocb.
10048 * @flag: Flag indicating if this command can be put into txq.
10050 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10051 * function. This function gets the hbalock and calls
10052 * __lpfc_sli_issue_iocb function and will return the error returned
10053 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10054 * functions which do not hold hbalock.
10057 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10058 struct lpfc_iocbq *piocb, uint32_t flag)
10060 struct lpfc_hba_eq_hdl *hba_eq_hdl;
10061 struct lpfc_sli_ring *pring;
10062 struct lpfc_queue *fpeq;
10063 struct lpfc_eqe *eqe;
10064 unsigned long iflags;
10067 if (phba->sli_rev == LPFC_SLI_REV4) {
10068 pring = lpfc_sli4_calc_ring(phba, piocb);
10069 if (unlikely(pring == NULL))
10072 spin_lock_irqsave(&pring->ring_lock, iflags);
10073 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10074 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10076 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
10077 idx = piocb->hba_wqidx;
10078 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
10080 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
10082 /* Get associated EQ with this index */
10083 fpeq = phba->sli4_hba.hba_eq[idx];
10085 /* Turn off interrupts from this EQ */
10086 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
10089 * Process all the events on FCP EQ
10091 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
10092 lpfc_sli4_hba_handle_eqe(phba,
10094 fpeq->EQ_processed++;
10097 /* Always clear and re-arm the EQ */
10098 phba->sli4_hba.sli4_eq_release(fpeq,
10101 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
10104 /* For now, SLI2/3 will still use hbalock */
10105 spin_lock_irqsave(&phba->hbalock, iflags);
10106 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10107 spin_unlock_irqrestore(&phba->hbalock, iflags);
10113 * lpfc_extra_ring_setup - Extra ring setup function
10114 * @phba: Pointer to HBA context object.
10116 * This function is called while driver attaches with the
10117 * HBA to setup the extra ring. The extra ring is used
10118 * only when driver needs to support target mode functionality
10119 * or IP over FC functionalities.
10121 * This function is called with no lock held. SLI3 only.
10124 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10126 struct lpfc_sli *psli;
10127 struct lpfc_sli_ring *pring;
10131 /* Adjust cmd/rsp ring iocb entries more evenly */
10133 /* Take some away from the FCP ring */
10134 pring = &psli->sli3_ring[LPFC_FCP_RING];
10135 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10136 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10137 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10138 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10140 /* and give them to the extra ring */
10141 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10143 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10144 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10145 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10146 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10148 /* Setup default profile for this ring */
10149 pring->iotag_max = 4096;
10150 pring->num_mask = 1;
10151 pring->prt[0].profile = 0; /* Mask 0 */
10152 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10153 pring->prt[0].type = phba->cfg_multi_ring_type;
10154 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10158 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10159 * @phba: Pointer to HBA context object.
10160 * @iocbq: Pointer to iocb object.
10162 * The async_event handler calls this routine when it receives
10163 * an ASYNC_STATUS_CN event from the port. The port generates
10164 * this event when an Abort Sequence request to an rport fails
10165 * twice in succession. The abort could be originated by the
10166 * driver or by the port. The ABTS could have been for an ELS
10167 * or FCP IO. The port only generates this event when an ABTS
10168 * fails to complete after one retry.
10171 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10172 struct lpfc_iocbq *iocbq)
10174 struct lpfc_nodelist *ndlp = NULL;
10175 uint16_t rpi = 0, vpi = 0;
10176 struct lpfc_vport *vport = NULL;
10178 /* The rpi in the ulpContext is vport-sensitive. */
10179 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10180 rpi = iocbq->iocb.ulpContext;
10182 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10183 "3092 Port generated ABTS async event "
10184 "on vpi %d rpi %d status 0x%x\n",
10185 vpi, rpi, iocbq->iocb.ulpStatus);
10187 vport = lpfc_find_vport_by_vpid(phba, vpi);
10190 ndlp = lpfc_findnode_rpi(vport, rpi);
10191 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10194 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10195 lpfc_sli_abts_recover_port(vport, ndlp);
10199 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10200 "3095 Event Context not found, no "
10201 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10202 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10206 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10207 * @phba: pointer to HBA context object.
10208 * @ndlp: nodelist pointer for the impacted rport.
10209 * @axri: pointer to the wcqe containing the failed exchange.
10211 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10212 * port. The port generates this event when an abort exchange request to an
10213 * rport fails twice in succession with no reply. The abort could be originated
10214 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10217 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10218 struct lpfc_nodelist *ndlp,
10219 struct sli4_wcqe_xri_aborted *axri)
10221 struct lpfc_vport *vport;
10222 uint32_t ext_status = 0;
10224 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10225 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10226 "3115 Node Context not found, driver "
10227 "ignoring abts err event\n");
10231 vport = ndlp->vport;
10232 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10233 "3116 Port generated FCP XRI ABORT event on "
10234 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10235 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10236 bf_get(lpfc_wcqe_xa_xri, axri),
10237 bf_get(lpfc_wcqe_xa_status, axri),
10241 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10242 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10243 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10245 ext_status = axri->parameter & IOERR_PARAM_MASK;
10246 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10247 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10248 lpfc_sli_abts_recover_port(vport, ndlp);
10252 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10253 * @phba: Pointer to HBA context object.
10254 * @pring: Pointer to driver SLI ring object.
10255 * @iocbq: Pointer to iocb object.
10257 * This function is called by the slow ring event handler
10258 * function when there is an ASYNC event iocb in the ring.
10259 * This function is called with no lock held.
10260 * Currently this function handles only temperature related
10261 * ASYNC events. The function decodes the temperature sensor
10262 * event message and posts events for the management applications.
10265 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10266 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10270 struct temp_event temp_event_data;
10271 struct Scsi_Host *shost;
10274 icmd = &iocbq->iocb;
10275 evt_code = icmd->un.asyncstat.evt_code;
10277 switch (evt_code) {
10278 case ASYNC_TEMP_WARN:
10279 case ASYNC_TEMP_SAFE:
10280 temp_event_data.data = (uint32_t) icmd->ulpContext;
10281 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10282 if (evt_code == ASYNC_TEMP_WARN) {
10283 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10284 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10285 "0347 Adapter is very hot, please take "
10286 "corrective action. temperature : %d Celsius\n",
10287 (uint32_t) icmd->ulpContext);
10289 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10290 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10291 "0340 Adapter temperature is OK now. "
10292 "temperature : %d Celsius\n",
10293 (uint32_t) icmd->ulpContext);
10296 /* Send temperature change event to applications */
10297 shost = lpfc_shost_from_vport(phba->pport);
10298 fc_host_post_vendor_event(shost, fc_get_event_number(),
10299 sizeof(temp_event_data), (char *) &temp_event_data,
10300 LPFC_NL_VENDOR_ID);
10302 case ASYNC_STATUS_CN:
10303 lpfc_sli_abts_err_handler(phba, iocbq);
10306 iocb_w = (uint32_t *) icmd;
10307 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10308 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10310 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10311 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10312 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10313 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10314 pring->ringno, icmd->un.asyncstat.evt_code,
10315 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10316 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10317 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10318 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10326 * lpfc_sli4_setup - SLI ring setup function
10327 * @phba: Pointer to HBA context object.
10329 * lpfc_sli_setup sets up rings of the SLI interface with
10330 * number of iocbs per ring and iotags. This function is
10331 * called while driver attach to the HBA and before the
10332 * interrupts are enabled. So there is no need for locking.
10334 * This function always returns 0.
10337 lpfc_sli4_setup(struct lpfc_hba *phba)
10339 struct lpfc_sli_ring *pring;
10341 pring = phba->sli4_hba.els_wq->pring;
10342 pring->num_mask = LPFC_MAX_RING_MASK;
10343 pring->prt[0].profile = 0; /* Mask 0 */
10344 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10345 pring->prt[0].type = FC_TYPE_ELS;
10346 pring->prt[0].lpfc_sli_rcv_unsol_event =
10347 lpfc_els_unsol_event;
10348 pring->prt[1].profile = 0; /* Mask 1 */
10349 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10350 pring->prt[1].type = FC_TYPE_ELS;
10351 pring->prt[1].lpfc_sli_rcv_unsol_event =
10352 lpfc_els_unsol_event;
10353 pring->prt[2].profile = 0; /* Mask 2 */
10354 /* NameServer Inquiry */
10355 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10357 pring->prt[2].type = FC_TYPE_CT;
10358 pring->prt[2].lpfc_sli_rcv_unsol_event =
10359 lpfc_ct_unsol_event;
10360 pring->prt[3].profile = 0; /* Mask 3 */
10361 /* NameServer response */
10362 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10364 pring->prt[3].type = FC_TYPE_CT;
10365 pring->prt[3].lpfc_sli_rcv_unsol_event =
10366 lpfc_ct_unsol_event;
10371 * lpfc_sli_setup - SLI ring setup function
10372 * @phba: Pointer to HBA context object.
10374 * lpfc_sli_setup sets up rings of the SLI interface with
10375 * number of iocbs per ring and iotags. This function is
10376 * called while driver attach to the HBA and before the
10377 * interrupts are enabled. So there is no need for locking.
10379 * This function always returns 0. SLI3 only.
10382 lpfc_sli_setup(struct lpfc_hba *phba)
10384 int i, totiocbsize = 0;
10385 struct lpfc_sli *psli = &phba->sli;
10386 struct lpfc_sli_ring *pring;
10388 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10389 psli->sli_flag = 0;
10391 psli->iocbq_lookup = NULL;
10392 psli->iocbq_lookup_len = 0;
10393 psli->last_iotag = 0;
10395 for (i = 0; i < psli->num_rings; i++) {
10396 pring = &psli->sli3_ring[i];
10398 case LPFC_FCP_RING: /* ring 0 - FCP */
10399 /* numCiocb and numRiocb are used in config_port */
10400 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10401 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10402 pring->sli.sli3.numCiocb +=
10403 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10404 pring->sli.sli3.numRiocb +=
10405 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10406 pring->sli.sli3.numCiocb +=
10407 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10408 pring->sli.sli3.numRiocb +=
10409 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10410 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10411 SLI3_IOCB_CMD_SIZE :
10412 SLI2_IOCB_CMD_SIZE;
10413 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10414 SLI3_IOCB_RSP_SIZE :
10415 SLI2_IOCB_RSP_SIZE;
10416 pring->iotag_ctr = 0;
10418 (phba->cfg_hba_queue_depth * 2);
10419 pring->fast_iotag = pring->iotag_max;
10420 pring->num_mask = 0;
10422 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10423 /* numCiocb and numRiocb are used in config_port */
10424 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10425 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10426 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10427 SLI3_IOCB_CMD_SIZE :
10428 SLI2_IOCB_CMD_SIZE;
10429 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10430 SLI3_IOCB_RSP_SIZE :
10431 SLI2_IOCB_RSP_SIZE;
10432 pring->iotag_max = phba->cfg_hba_queue_depth;
10433 pring->num_mask = 0;
10435 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10436 /* numCiocb and numRiocb are used in config_port */
10437 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10438 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10439 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10440 SLI3_IOCB_CMD_SIZE :
10441 SLI2_IOCB_CMD_SIZE;
10442 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10443 SLI3_IOCB_RSP_SIZE :
10444 SLI2_IOCB_RSP_SIZE;
10445 pring->fast_iotag = 0;
10446 pring->iotag_ctr = 0;
10447 pring->iotag_max = 4096;
10448 pring->lpfc_sli_rcv_async_status =
10449 lpfc_sli_async_event_handler;
10450 pring->num_mask = LPFC_MAX_RING_MASK;
10451 pring->prt[0].profile = 0; /* Mask 0 */
10452 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10453 pring->prt[0].type = FC_TYPE_ELS;
10454 pring->prt[0].lpfc_sli_rcv_unsol_event =
10455 lpfc_els_unsol_event;
10456 pring->prt[1].profile = 0; /* Mask 1 */
10457 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10458 pring->prt[1].type = FC_TYPE_ELS;
10459 pring->prt[1].lpfc_sli_rcv_unsol_event =
10460 lpfc_els_unsol_event;
10461 pring->prt[2].profile = 0; /* Mask 2 */
10462 /* NameServer Inquiry */
10463 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10465 pring->prt[2].type = FC_TYPE_CT;
10466 pring->prt[2].lpfc_sli_rcv_unsol_event =
10467 lpfc_ct_unsol_event;
10468 pring->prt[3].profile = 0; /* Mask 3 */
10469 /* NameServer response */
10470 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10472 pring->prt[3].type = FC_TYPE_CT;
10473 pring->prt[3].lpfc_sli_rcv_unsol_event =
10474 lpfc_ct_unsol_event;
10477 totiocbsize += (pring->sli.sli3.numCiocb *
10478 pring->sli.sli3.sizeCiocb) +
10479 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10481 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10482 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10483 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10484 "SLI2 SLIM Data: x%x x%lx\n",
10485 phba->brd_no, totiocbsize,
10486 (unsigned long) MAX_SLIM_IOCB_SIZE);
10488 if (phba->cfg_multi_ring_support == 2)
10489 lpfc_extra_ring_setup(phba);
10495 * lpfc_sli4_queue_init - Queue initialization function
10496 * @phba: Pointer to HBA context object.
10498 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10499 * ring. This function also initializes ring indices of each ring.
10500 * This function is called during the initialization of the SLI
10501 * interface of an HBA.
10502 * This function is called with no lock held and always returns
10506 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10508 struct lpfc_sli *psli;
10509 struct lpfc_sli_ring *pring;
10513 spin_lock_irq(&phba->hbalock);
10514 INIT_LIST_HEAD(&psli->mboxq);
10515 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10516 /* Initialize list headers for txq and txcmplq as double linked lists */
10517 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
10518 pring = phba->sli4_hba.fcp_wq[i]->pring;
10520 pring->ringno = LPFC_FCP_RING;
10521 INIT_LIST_HEAD(&pring->txq);
10522 INIT_LIST_HEAD(&pring->txcmplq);
10523 INIT_LIST_HEAD(&pring->iocb_continueq);
10524 spin_lock_init(&pring->ring_lock);
10526 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
10527 pring = phba->sli4_hba.nvme_wq[i]->pring;
10529 pring->ringno = LPFC_FCP_RING;
10530 INIT_LIST_HEAD(&pring->txq);
10531 INIT_LIST_HEAD(&pring->txcmplq);
10532 INIT_LIST_HEAD(&pring->iocb_continueq);
10533 spin_lock_init(&pring->ring_lock);
10535 pring = phba->sli4_hba.els_wq->pring;
10537 pring->ringno = LPFC_ELS_RING;
10538 INIT_LIST_HEAD(&pring->txq);
10539 INIT_LIST_HEAD(&pring->txcmplq);
10540 INIT_LIST_HEAD(&pring->iocb_continueq);
10541 spin_lock_init(&pring->ring_lock);
10543 if (phba->cfg_nvme_io_channel) {
10544 pring = phba->sli4_hba.nvmels_wq->pring;
10546 pring->ringno = LPFC_ELS_RING;
10547 INIT_LIST_HEAD(&pring->txq);
10548 INIT_LIST_HEAD(&pring->txcmplq);
10549 INIT_LIST_HEAD(&pring->iocb_continueq);
10550 spin_lock_init(&pring->ring_lock);
10553 if (phba->cfg_fof) {
10554 pring = phba->sli4_hba.oas_wq->pring;
10556 pring->ringno = LPFC_FCP_RING;
10557 INIT_LIST_HEAD(&pring->txq);
10558 INIT_LIST_HEAD(&pring->txcmplq);
10559 INIT_LIST_HEAD(&pring->iocb_continueq);
10560 spin_lock_init(&pring->ring_lock);
10563 spin_unlock_irq(&phba->hbalock);
10567 * lpfc_sli_queue_init - Queue initialization function
10568 * @phba: Pointer to HBA context object.
10570 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10571 * ring. This function also initializes ring indices of each ring.
10572 * This function is called during the initialization of the SLI
10573 * interface of an HBA.
10574 * This function is called with no lock held and always returns
10578 lpfc_sli_queue_init(struct lpfc_hba *phba)
10580 struct lpfc_sli *psli;
10581 struct lpfc_sli_ring *pring;
10585 spin_lock_irq(&phba->hbalock);
10586 INIT_LIST_HEAD(&psli->mboxq);
10587 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10588 /* Initialize list headers for txq and txcmplq as double linked lists */
10589 for (i = 0; i < psli->num_rings; i++) {
10590 pring = &psli->sli3_ring[i];
10592 pring->sli.sli3.next_cmdidx = 0;
10593 pring->sli.sli3.local_getidx = 0;
10594 pring->sli.sli3.cmdidx = 0;
10595 INIT_LIST_HEAD(&pring->iocb_continueq);
10596 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10597 INIT_LIST_HEAD(&pring->postbufq);
10599 INIT_LIST_HEAD(&pring->txq);
10600 INIT_LIST_HEAD(&pring->txcmplq);
10601 spin_lock_init(&pring->ring_lock);
10603 spin_unlock_irq(&phba->hbalock);
10607 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10608 * @phba: Pointer to HBA context object.
10610 * This routine flushes the mailbox command subsystem. It will unconditionally
10611 * flush all the mailbox commands in the three possible stages in the mailbox
10612 * command sub-system: pending mailbox command queue; the outstanding mailbox
10613 * command; and completed mailbox command queue. It is caller's responsibility
10614 * to make sure that the driver is in the proper state to flush the mailbox
10615 * command sub-system. Namely, the posting of mailbox commands into the
10616 * pending mailbox command queue from the various clients must be stopped;
10617 * either the HBA is in a state that it will never works on the outstanding
10618 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10619 * mailbox command has been completed.
10622 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10624 LIST_HEAD(completions);
10625 struct lpfc_sli *psli = &phba->sli;
10627 unsigned long iflag;
10629 /* Disable softirqs, including timers from obtaining phba->hbalock */
10630 local_bh_disable();
10632 /* Flush all the mailbox commands in the mbox system */
10633 spin_lock_irqsave(&phba->hbalock, iflag);
10635 /* The pending mailbox command queue */
10636 list_splice_init(&phba->sli.mboxq, &completions);
10637 /* The outstanding active mailbox command */
10638 if (psli->mbox_active) {
10639 list_add_tail(&psli->mbox_active->list, &completions);
10640 psli->mbox_active = NULL;
10641 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10643 /* The completed mailbox command queue */
10644 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10645 spin_unlock_irqrestore(&phba->hbalock, iflag);
10647 /* Enable softirqs again, done with phba->hbalock */
10650 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10651 while (!list_empty(&completions)) {
10652 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10653 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10654 if (pmb->mbox_cmpl)
10655 pmb->mbox_cmpl(phba, pmb);
10660 * lpfc_sli_host_down - Vport cleanup function
10661 * @vport: Pointer to virtual port object.
10663 * lpfc_sli_host_down is called to clean up the resources
10664 * associated with a vport before destroying virtual
10665 * port data structures.
10666 * This function does following operations:
10667 * - Free discovery resources associated with this virtual
10669 * - Free iocbs associated with this virtual port in
10671 * - Send abort for all iocb commands associated with this
10672 * vport in txcmplq.
10674 * This function is called with no lock held and always returns 1.
10677 lpfc_sli_host_down(struct lpfc_vport *vport)
10679 LIST_HEAD(completions);
10680 struct lpfc_hba *phba = vport->phba;
10681 struct lpfc_sli *psli = &phba->sli;
10682 struct lpfc_queue *qp = NULL;
10683 struct lpfc_sli_ring *pring;
10684 struct lpfc_iocbq *iocb, *next_iocb;
10686 unsigned long flags = 0;
10687 uint16_t prev_pring_flag;
10689 lpfc_cleanup_discovery_resources(vport);
10691 spin_lock_irqsave(&phba->hbalock, flags);
10694 * Error everything on the txq since these iocbs
10695 * have not been given to the FW yet.
10696 * Also issue ABTS for everything on the txcmplq
10698 if (phba->sli_rev != LPFC_SLI_REV4) {
10699 for (i = 0; i < psli->num_rings; i++) {
10700 pring = &psli->sli3_ring[i];
10701 prev_pring_flag = pring->flag;
10702 /* Only slow rings */
10703 if (pring->ringno == LPFC_ELS_RING) {
10704 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10705 /* Set the lpfc data pending flag */
10706 set_bit(LPFC_DATA_READY, &phba->data_flags);
10708 list_for_each_entry_safe(iocb, next_iocb,
10709 &pring->txq, list) {
10710 if (iocb->vport != vport)
10712 list_move_tail(&iocb->list, &completions);
10714 list_for_each_entry_safe(iocb, next_iocb,
10715 &pring->txcmplq, list) {
10716 if (iocb->vport != vport)
10718 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10720 pring->flag = prev_pring_flag;
10723 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10727 if (pring == phba->sli4_hba.els_wq->pring) {
10728 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10729 /* Set the lpfc data pending flag */
10730 set_bit(LPFC_DATA_READY, &phba->data_flags);
10732 prev_pring_flag = pring->flag;
10733 spin_lock_irq(&pring->ring_lock);
10734 list_for_each_entry_safe(iocb, next_iocb,
10735 &pring->txq, list) {
10736 if (iocb->vport != vport)
10738 list_move_tail(&iocb->list, &completions);
10740 spin_unlock_irq(&pring->ring_lock);
10741 list_for_each_entry_safe(iocb, next_iocb,
10742 &pring->txcmplq, list) {
10743 if (iocb->vport != vport)
10745 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10747 pring->flag = prev_pring_flag;
10750 spin_unlock_irqrestore(&phba->hbalock, flags);
10752 /* Cancel all the IOCBs from the completions list */
10753 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10759 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10760 * @phba: Pointer to HBA context object.
10762 * This function cleans up all iocb, buffers, mailbox commands
10763 * while shutting down the HBA. This function is called with no
10764 * lock held and always returns 1.
10765 * This function does the following to cleanup driver resources:
10766 * - Free discovery resources for each virtual port
10767 * - Cleanup any pending fabric iocbs
10768 * - Iterate through the iocb txq and free each entry
10770 * - Free up any buffer posted to the HBA
10771 * - Free mailbox commands in the mailbox queue.
10774 lpfc_sli_hba_down(struct lpfc_hba *phba)
10776 LIST_HEAD(completions);
10777 struct lpfc_sli *psli = &phba->sli;
10778 struct lpfc_queue *qp = NULL;
10779 struct lpfc_sli_ring *pring;
10780 struct lpfc_dmabuf *buf_ptr;
10781 unsigned long flags = 0;
10784 /* Shutdown the mailbox command sub-system */
10785 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10787 lpfc_hba_down_prep(phba);
10789 /* Disable softirqs, including timers from obtaining phba->hbalock */
10790 local_bh_disable();
10792 lpfc_fabric_abort_hba(phba);
10794 spin_lock_irqsave(&phba->hbalock, flags);
10797 * Error everything on the txq since these iocbs
10798 * have not been given to the FW yet.
10800 if (phba->sli_rev != LPFC_SLI_REV4) {
10801 for (i = 0; i < psli->num_rings; i++) {
10802 pring = &psli->sli3_ring[i];
10803 /* Only slow rings */
10804 if (pring->ringno == LPFC_ELS_RING) {
10805 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10806 /* Set the lpfc data pending flag */
10807 set_bit(LPFC_DATA_READY, &phba->data_flags);
10809 list_splice_init(&pring->txq, &completions);
10812 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10816 spin_lock_irq(&pring->ring_lock);
10817 list_splice_init(&pring->txq, &completions);
10818 spin_unlock_irq(&pring->ring_lock);
10819 if (pring == phba->sli4_hba.els_wq->pring) {
10820 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10821 /* Set the lpfc data pending flag */
10822 set_bit(LPFC_DATA_READY, &phba->data_flags);
10826 spin_unlock_irqrestore(&phba->hbalock, flags);
10828 /* Cancel all the IOCBs from the completions list */
10829 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10832 spin_lock_irqsave(&phba->hbalock, flags);
10833 list_splice_init(&phba->elsbuf, &completions);
10834 phba->elsbuf_cnt = 0;
10835 phba->elsbuf_prev_cnt = 0;
10836 spin_unlock_irqrestore(&phba->hbalock, flags);
10838 while (!list_empty(&completions)) {
10839 list_remove_head(&completions, buf_ptr,
10840 struct lpfc_dmabuf, list);
10841 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10845 /* Enable softirqs again, done with phba->hbalock */
10848 /* Return any active mbox cmds */
10849 del_timer_sync(&psli->mbox_tmo);
10851 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10852 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10853 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10859 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10860 * @srcp: Source memory pointer.
10861 * @destp: Destination memory pointer.
10862 * @cnt: Number of words required to be copied.
10864 * This function is used for copying data between driver memory
10865 * and the SLI memory. This function also changes the endianness
10866 * of each word if native endianness is different from SLI
10867 * endianness. This function can be called with or without
10871 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10873 uint32_t *src = srcp;
10874 uint32_t *dest = destp;
10878 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10880 ldata = le32_to_cpu(ldata);
10889 * lpfc_sli_bemem_bcopy - SLI memory copy function
10890 * @srcp: Source memory pointer.
10891 * @destp: Destination memory pointer.
10892 * @cnt: Number of words required to be copied.
10894 * This function is used for copying data between a data structure
10895 * with big endian representation to local endianness.
10896 * This function can be called with or without lock.
10899 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10901 uint32_t *src = srcp;
10902 uint32_t *dest = destp;
10906 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10908 ldata = be32_to_cpu(ldata);
10916 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10917 * @phba: Pointer to HBA context object.
10918 * @pring: Pointer to driver SLI ring object.
10919 * @mp: Pointer to driver buffer object.
10921 * This function is called with no lock held.
10922 * It always return zero after adding the buffer to the postbufq
10926 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10927 struct lpfc_dmabuf *mp)
10929 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10931 spin_lock_irq(&phba->hbalock);
10932 list_add_tail(&mp->list, &pring->postbufq);
10933 pring->postbufq_cnt++;
10934 spin_unlock_irq(&phba->hbalock);
10939 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10940 * @phba: Pointer to HBA context object.
10942 * When HBQ is enabled, buffers are searched based on tags. This function
10943 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10944 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10945 * does not conflict with tags of buffer posted for unsolicited events.
10946 * The function returns the allocated tag. The function is called with
10950 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10952 spin_lock_irq(&phba->hbalock);
10953 phba->buffer_tag_count++;
10955 * Always set the QUE_BUFTAG_BIT to distiguish between
10956 * a tag assigned by HBQ.
10958 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10959 spin_unlock_irq(&phba->hbalock);
10960 return phba->buffer_tag_count;
10964 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10965 * @phba: Pointer to HBA context object.
10966 * @pring: Pointer to driver SLI ring object.
10967 * @tag: Buffer tag.
10969 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10970 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10971 * iocb is posted to the response ring with the tag of the buffer.
10972 * This function searches the pring->postbufq list using the tag
10973 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10974 * iocb. If the buffer is found then lpfc_dmabuf object of the
10975 * buffer is returned to the caller else NULL is returned.
10976 * This function is called with no lock held.
10978 struct lpfc_dmabuf *
10979 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10982 struct lpfc_dmabuf *mp, *next_mp;
10983 struct list_head *slp = &pring->postbufq;
10985 /* Search postbufq, from the beginning, looking for a match on tag */
10986 spin_lock_irq(&phba->hbalock);
10987 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10988 if (mp->buffer_tag == tag) {
10989 list_del_init(&mp->list);
10990 pring->postbufq_cnt--;
10991 spin_unlock_irq(&phba->hbalock);
10996 spin_unlock_irq(&phba->hbalock);
10997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10998 "0402 Cannot find virtual addr for buffer tag on "
10999 "ring %d Data x%lx x%p x%p x%x\n",
11000 pring->ringno, (unsigned long) tag,
11001 slp->next, slp->prev, pring->postbufq_cnt);
11007 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11008 * @phba: Pointer to HBA context object.
11009 * @pring: Pointer to driver SLI ring object.
11010 * @phys: DMA address of the buffer.
11012 * This function searches the buffer list using the dma_address
11013 * of unsolicited event to find the driver's lpfc_dmabuf object
11014 * corresponding to the dma_address. The function returns the
11015 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11016 * This function is called by the ct and els unsolicited event
11017 * handlers to get the buffer associated with the unsolicited
11020 * This function is called with no lock held.
11022 struct lpfc_dmabuf *
11023 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11026 struct lpfc_dmabuf *mp, *next_mp;
11027 struct list_head *slp = &pring->postbufq;
11029 /* Search postbufq, from the beginning, looking for a match on phys */
11030 spin_lock_irq(&phba->hbalock);
11031 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11032 if (mp->phys == phys) {
11033 list_del_init(&mp->list);
11034 pring->postbufq_cnt--;
11035 spin_unlock_irq(&phba->hbalock);
11040 spin_unlock_irq(&phba->hbalock);
11041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11042 "0410 Cannot find virtual addr for mapped buf on "
11043 "ring %d Data x%llx x%p x%p x%x\n",
11044 pring->ringno, (unsigned long long)phys,
11045 slp->next, slp->prev, pring->postbufq_cnt);
11050 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11051 * @phba: Pointer to HBA context object.
11052 * @cmdiocb: Pointer to driver command iocb object.
11053 * @rspiocb: Pointer to driver response iocb object.
11055 * This function is the completion handler for the abort iocbs for
11056 * ELS commands. This function is called from the ELS ring event
11057 * handler with no lock held. This function frees memory resources
11058 * associated with the abort iocb.
11061 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11062 struct lpfc_iocbq *rspiocb)
11064 IOCB_t *irsp = &rspiocb->iocb;
11065 uint16_t abort_iotag, abort_context;
11066 struct lpfc_iocbq *abort_iocb = NULL;
11068 if (irsp->ulpStatus) {
11071 * Assume that the port already completed and returned, or
11072 * will return the iocb. Just Log the message.
11074 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11075 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11077 spin_lock_irq(&phba->hbalock);
11078 if (phba->sli_rev < LPFC_SLI_REV4) {
11079 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11080 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11081 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11082 spin_unlock_irq(&phba->hbalock);
11085 if (abort_iotag != 0 &&
11086 abort_iotag <= phba->sli.last_iotag)
11088 phba->sli.iocbq_lookup[abort_iotag];
11090 /* For sli4 the abort_tag is the XRI,
11091 * so the abort routine puts the iotag of the iocb
11092 * being aborted in the context field of the abort
11095 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11097 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11098 "0327 Cannot abort els iocb %p "
11099 "with tag %x context %x, abort status %x, "
11101 abort_iocb, abort_iotag, abort_context,
11102 irsp->ulpStatus, irsp->un.ulpWord[4]);
11104 spin_unlock_irq(&phba->hbalock);
11107 lpfc_sli_release_iocbq(phba, cmdiocb);
11112 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11113 * @phba: Pointer to HBA context object.
11114 * @cmdiocb: Pointer to driver command iocb object.
11115 * @rspiocb: Pointer to driver response iocb object.
11117 * The function is called from SLI ring event handler with no
11118 * lock held. This function is the completion handler for ELS commands
11119 * which are aborted. The function frees memory resources used for
11120 * the aborted ELS commands.
11123 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11124 struct lpfc_iocbq *rspiocb)
11126 IOCB_t *irsp = &rspiocb->iocb;
11128 /* ELS cmd tag <ulpIoTag> completes */
11129 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11130 "0139 Ignoring ELS cmd tag x%x completion Data: "
11132 irsp->ulpIoTag, irsp->ulpStatus,
11133 irsp->un.ulpWord[4], irsp->ulpTimeout);
11134 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11135 lpfc_ct_free_iocb(phba, cmdiocb);
11137 lpfc_els_free_iocb(phba, cmdiocb);
11142 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11143 * @phba: Pointer to HBA context object.
11144 * @pring: Pointer to driver SLI ring object.
11145 * @cmdiocb: Pointer to driver command iocb object.
11147 * This function issues an abort iocb for the provided command iocb down to
11148 * the port. Other than the case the outstanding command iocb is an abort
11149 * request, this function issues abort out unconditionally. This function is
11150 * called with hbalock held. The function returns 0 when it fails due to
11151 * memory allocation failure or when the command iocb is an abort request.
11154 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11155 struct lpfc_iocbq *cmdiocb)
11157 struct lpfc_vport *vport = cmdiocb->vport;
11158 struct lpfc_iocbq *abtsiocbp;
11159 IOCB_t *icmd = NULL;
11160 IOCB_t *iabt = NULL;
11162 unsigned long iflags;
11163 struct lpfc_nodelist *ndlp;
11165 lockdep_assert_held(&phba->hbalock);
11168 * There are certain command types we don't want to abort. And we
11169 * don't want to abort commands that are already in the process of
11172 icmd = &cmdiocb->iocb;
11173 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11174 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11175 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11178 /* issue ABTS for this IOCB based on iotag */
11179 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11180 if (abtsiocbp == NULL)
11183 /* This signals the response to set the correct status
11184 * before calling the completion handler
11186 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11188 iabt = &abtsiocbp->iocb;
11189 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11190 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11191 if (phba->sli_rev == LPFC_SLI_REV4) {
11192 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11193 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11195 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11196 if (pring->ringno == LPFC_ELS_RING) {
11197 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11198 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11202 iabt->ulpClass = icmd->ulpClass;
11204 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11205 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11206 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11207 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11208 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11209 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11211 if (phba->link_state >= LPFC_LINK_UP)
11212 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11214 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11216 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11217 abtsiocbp->vport = vport;
11219 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11220 "0339 Abort xri x%x, original iotag x%x, "
11221 "abort cmd iotag x%x\n",
11222 iabt->un.acxri.abortIoTag,
11223 iabt->un.acxri.abortContextTag,
11226 if (phba->sli_rev == LPFC_SLI_REV4) {
11227 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11228 if (unlikely(pring == NULL))
11230 /* Note: both hbalock and ring_lock need to be set here */
11231 spin_lock_irqsave(&pring->ring_lock, iflags);
11232 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11234 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11236 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11241 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11244 * Caller to this routine should check for IOCB_ERROR
11245 * and handle it properly. This routine no longer removes
11246 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11252 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11253 * @phba: Pointer to HBA context object.
11254 * @pring: Pointer to driver SLI ring object.
11255 * @cmdiocb: Pointer to driver command iocb object.
11257 * This function issues an abort iocb for the provided command iocb. In case
11258 * of unloading, the abort iocb will not be issued to commands on the ELS
11259 * ring. Instead, the callback function shall be changed to those commands
11260 * so that nothing happens when them finishes. This function is called with
11261 * hbalock held. The function returns 0 when the command iocb is an abort
11265 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11266 struct lpfc_iocbq *cmdiocb)
11268 struct lpfc_vport *vport = cmdiocb->vport;
11269 int retval = IOCB_ERROR;
11270 IOCB_t *icmd = NULL;
11272 lockdep_assert_held(&phba->hbalock);
11275 * There are certain command types we don't want to abort. And we
11276 * don't want to abort commands that are already in the process of
11279 icmd = &cmdiocb->iocb;
11280 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11281 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11282 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11286 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11287 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11289 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11290 goto abort_iotag_exit;
11294 * If we're unloading, don't abort iocb on the ELS ring, but change
11295 * the callback so that nothing happens when it finishes.
11297 if ((vport->load_flag & FC_UNLOADING) &&
11298 (pring->ringno == LPFC_ELS_RING)) {
11299 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11300 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11302 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11303 goto abort_iotag_exit;
11306 /* Now, we try to issue the abort to the cmdiocb out */
11307 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11311 * Caller to this routine should check for IOCB_ERROR
11312 * and handle it properly. This routine no longer removes
11313 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11319 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
11320 * @phba: Pointer to HBA context object.
11321 * @pring: Pointer to driver SLI ring object.
11322 * @cmdiocb: Pointer to driver command iocb object.
11324 * This function issues an abort iocb for the provided command iocb down to
11325 * the port. Other than the case the outstanding command iocb is an abort
11326 * request, this function issues abort out unconditionally. This function is
11327 * called with hbalock held. The function returns 0 when it fails due to
11328 * memory allocation failure or when the command iocb is an abort request.
11331 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11332 struct lpfc_iocbq *cmdiocb)
11334 struct lpfc_vport *vport = cmdiocb->vport;
11335 struct lpfc_iocbq *abtsiocbp;
11336 union lpfc_wqe128 *abts_wqe;
11340 * There are certain command types we don't want to abort. And we
11341 * don't want to abort commands that are already in the process of
11344 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
11345 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
11346 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11349 /* issue ABTS for this io based on iotag */
11350 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11351 if (abtsiocbp == NULL)
11354 /* This signals the response to set the correct status
11355 * before calling the completion handler
11357 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11359 /* Complete prepping the abort wqe and issue to the FW. */
11360 abts_wqe = &abtsiocbp->wqe;
11362 /* Clear any stale WQE contents */
11363 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11364 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
11367 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11368 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11369 cmdiocb->iocb.ulpClass);
11371 /* word 8 - tell the FW to abort the IO associated with this
11372 * outstanding exchange ID.
11374 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
11376 /* word 9 - this is the iotag for the abts_wqe completion. */
11377 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
11381 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11382 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11385 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11386 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
11387 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
11389 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11390 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
11391 abtsiocbp->vport = vport;
11392 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
11393 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
11395 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11396 "6147 Failed abts issue_wqe with status x%x "
11398 retval, cmdiocb->sli4_xritag);
11399 lpfc_sli_release_iocbq(phba, abtsiocbp);
11403 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11404 "6148 Drv Abort NVME Request Issued for "
11405 "ox_id x%x on reqtag x%x\n",
11406 cmdiocb->sli4_xritag,
11413 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11414 * @phba: pointer to lpfc HBA data structure.
11416 * This routine will abort all pending and outstanding iocbs to an HBA.
11419 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11421 struct lpfc_sli *psli = &phba->sli;
11422 struct lpfc_sli_ring *pring;
11423 struct lpfc_queue *qp = NULL;
11426 if (phba->sli_rev != LPFC_SLI_REV4) {
11427 for (i = 0; i < psli->num_rings; i++) {
11428 pring = &psli->sli3_ring[i];
11429 lpfc_sli_abort_iocb_ring(phba, pring);
11433 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11437 lpfc_sli_abort_iocb_ring(phba, pring);
11442 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11443 * @iocbq: Pointer to driver iocb object.
11444 * @vport: Pointer to driver virtual port object.
11445 * @tgt_id: SCSI ID of the target.
11446 * @lun_id: LUN ID of the scsi device.
11447 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11449 * This function acts as an iocb filter for functions which abort or count
11450 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11451 * 0 if the filtering criteria is met for the given iocb and will return
11452 * 1 if the filtering criteria is not met.
11453 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11454 * given iocb is for the SCSI device specified by vport, tgt_id and
11455 * lun_id parameter.
11456 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11457 * given iocb is for the SCSI target specified by vport and tgt_id
11459 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11460 * given iocb is for the SCSI host associated with the given vport.
11461 * This function is called with no locks held.
11464 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11465 uint16_t tgt_id, uint64_t lun_id,
11466 lpfc_ctx_cmd ctx_cmd)
11468 struct lpfc_scsi_buf *lpfc_cmd;
11471 if (iocbq->vport != vport)
11474 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11475 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11478 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11480 if (lpfc_cmd->pCmd == NULL)
11485 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11486 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11487 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11491 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11492 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11495 case LPFC_CTX_HOST:
11499 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11500 __func__, ctx_cmd);
11508 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11509 * @vport: Pointer to virtual port.
11510 * @tgt_id: SCSI ID of the target.
11511 * @lun_id: LUN ID of the scsi device.
11512 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11514 * This function returns number of FCP commands pending for the vport.
11515 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11516 * commands pending on the vport associated with SCSI device specified
11517 * by tgt_id and lun_id parameters.
11518 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11519 * commands pending on the vport associated with SCSI target specified
11520 * by tgt_id parameter.
11521 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11522 * commands pending on the vport.
11523 * This function returns the number of iocbs which satisfy the filter.
11524 * This function is called without any lock held.
11527 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11528 lpfc_ctx_cmd ctx_cmd)
11530 struct lpfc_hba *phba = vport->phba;
11531 struct lpfc_iocbq *iocbq;
11534 spin_lock_irq(&phba->hbalock);
11535 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11536 iocbq = phba->sli.iocbq_lookup[i];
11538 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11542 spin_unlock_irq(&phba->hbalock);
11548 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11549 * @phba: Pointer to HBA context object
11550 * @cmdiocb: Pointer to command iocb object.
11551 * @rspiocb: Pointer to response iocb object.
11553 * This function is called when an aborted FCP iocb completes. This
11554 * function is called by the ring event handler with no lock held.
11555 * This function frees the iocb.
11558 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11559 struct lpfc_iocbq *rspiocb)
11561 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11562 "3096 ABORT_XRI_CN completing on rpi x%x "
11563 "original iotag x%x, abort cmd iotag x%x "
11564 "status 0x%x, reason 0x%x\n",
11565 cmdiocb->iocb.un.acxri.abortContextTag,
11566 cmdiocb->iocb.un.acxri.abortIoTag,
11567 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11568 rspiocb->iocb.un.ulpWord[4]);
11569 lpfc_sli_release_iocbq(phba, cmdiocb);
11574 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11575 * @vport: Pointer to virtual port.
11576 * @pring: Pointer to driver SLI ring object.
11577 * @tgt_id: SCSI ID of the target.
11578 * @lun_id: LUN ID of the scsi device.
11579 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11581 * This function sends an abort command for every SCSI command
11582 * associated with the given virtual port pending on the ring
11583 * filtered by lpfc_sli_validate_fcp_iocb function.
11584 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11585 * FCP iocbs associated with lun specified by tgt_id and lun_id
11587 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11588 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11589 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11590 * FCP iocbs associated with virtual port.
11591 * This function returns number of iocbs it failed to abort.
11592 * This function is called with no locks held.
11595 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11596 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11598 struct lpfc_hba *phba = vport->phba;
11599 struct lpfc_iocbq *iocbq;
11600 struct lpfc_iocbq *abtsiocb;
11601 struct lpfc_sli_ring *pring_s4;
11602 IOCB_t *cmd = NULL;
11603 int errcnt = 0, ret_val = 0;
11606 /* all I/Os are in process of being flushed */
11607 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11610 for (i = 1; i <= phba->sli.last_iotag; i++) {
11611 iocbq = phba->sli.iocbq_lookup[i];
11613 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11618 * If the iocbq is already being aborted, don't take a second
11619 * action, but do count it.
11621 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11624 /* issue ABTS for this IOCB based on iotag */
11625 abtsiocb = lpfc_sli_get_iocbq(phba);
11626 if (abtsiocb == NULL) {
11631 /* indicate the IO is being aborted by the driver. */
11632 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11634 cmd = &iocbq->iocb;
11635 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11636 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11637 if (phba->sli_rev == LPFC_SLI_REV4)
11638 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11640 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11641 abtsiocb->iocb.ulpLe = 1;
11642 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11643 abtsiocb->vport = vport;
11645 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11646 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11647 if (iocbq->iocb_flag & LPFC_IO_FCP)
11648 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11649 if (iocbq->iocb_flag & LPFC_IO_FOF)
11650 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11652 if (lpfc_is_link_up(phba))
11653 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11655 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11657 /* Setup callback routine and issue the command. */
11658 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11659 if (phba->sli_rev == LPFC_SLI_REV4) {
11660 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11663 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11666 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11668 if (ret_val == IOCB_ERROR) {
11669 lpfc_sli_release_iocbq(phba, abtsiocb);
11679 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11680 * @vport: Pointer to virtual port.
11681 * @pring: Pointer to driver SLI ring object.
11682 * @tgt_id: SCSI ID of the target.
11683 * @lun_id: LUN ID of the scsi device.
11684 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11686 * This function sends an abort command for every SCSI command
11687 * associated with the given virtual port pending on the ring
11688 * filtered by lpfc_sli_validate_fcp_iocb function.
11689 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11690 * FCP iocbs associated with lun specified by tgt_id and lun_id
11692 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11693 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11694 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11695 * FCP iocbs associated with virtual port.
11696 * This function returns number of iocbs it aborted .
11697 * This function is called with no locks held right after a taskmgmt
11701 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11702 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11704 struct lpfc_hba *phba = vport->phba;
11705 struct lpfc_scsi_buf *lpfc_cmd;
11706 struct lpfc_iocbq *abtsiocbq;
11707 struct lpfc_nodelist *ndlp;
11708 struct lpfc_iocbq *iocbq;
11710 int sum, i, ret_val;
11711 unsigned long iflags;
11712 struct lpfc_sli_ring *pring_s4;
11714 spin_lock_irqsave(&phba->hbalock, iflags);
11716 /* all I/Os are in process of being flushed */
11717 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11718 spin_unlock_irqrestore(&phba->hbalock, iflags);
11723 for (i = 1; i <= phba->sli.last_iotag; i++) {
11724 iocbq = phba->sli.iocbq_lookup[i];
11726 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11731 * If the iocbq is already being aborted, don't take a second
11732 * action, but do count it.
11734 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11737 /* issue ABTS for this IOCB based on iotag */
11738 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11739 if (abtsiocbq == NULL)
11742 icmd = &iocbq->iocb;
11743 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11744 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11745 if (phba->sli_rev == LPFC_SLI_REV4)
11746 abtsiocbq->iocb.un.acxri.abortIoTag =
11747 iocbq->sli4_xritag;
11749 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11750 abtsiocbq->iocb.ulpLe = 1;
11751 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11752 abtsiocbq->vport = vport;
11754 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11755 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11756 if (iocbq->iocb_flag & LPFC_IO_FCP)
11757 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11758 if (iocbq->iocb_flag & LPFC_IO_FOF)
11759 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11761 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11762 ndlp = lpfc_cmd->rdata->pnode;
11764 if (lpfc_is_link_up(phba) &&
11765 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11766 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11768 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11770 /* Setup callback routine and issue the command. */
11771 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11774 * Indicate the IO is being aborted by the driver and set
11775 * the caller's flag into the aborted IO.
11777 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11779 if (phba->sli_rev == LPFC_SLI_REV4) {
11780 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
11783 /* Note: both hbalock and ring_lock must be set here */
11784 spin_lock(&pring_s4->ring_lock);
11785 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11787 spin_unlock(&pring_s4->ring_lock);
11789 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11794 if (ret_val == IOCB_ERROR)
11795 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11799 spin_unlock_irqrestore(&phba->hbalock, iflags);
11804 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11805 * @phba: Pointer to HBA context object.
11806 * @cmdiocbq: Pointer to command iocb.
11807 * @rspiocbq: Pointer to response iocb.
11809 * This function is the completion handler for iocbs issued using
11810 * lpfc_sli_issue_iocb_wait function. This function is called by the
11811 * ring event handler function without any lock held. This function
11812 * can be called from both worker thread context and interrupt
11813 * context. This function also can be called from other thread which
11814 * cleans up the SLI layer objects.
11815 * This function copy the contents of the response iocb to the
11816 * response iocb memory object provided by the caller of
11817 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11818 * sleeps for the iocb completion.
11821 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11822 struct lpfc_iocbq *cmdiocbq,
11823 struct lpfc_iocbq *rspiocbq)
11825 wait_queue_head_t *pdone_q;
11826 unsigned long iflags;
11827 struct lpfc_scsi_buf *lpfc_cmd;
11829 spin_lock_irqsave(&phba->hbalock, iflags);
11830 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11833 * A time out has occurred for the iocb. If a time out
11834 * completion handler has been supplied, call it. Otherwise,
11835 * just free the iocbq.
11838 spin_unlock_irqrestore(&phba->hbalock, iflags);
11839 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11840 cmdiocbq->wait_iocb_cmpl = NULL;
11841 if (cmdiocbq->iocb_cmpl)
11842 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11844 lpfc_sli_release_iocbq(phba, cmdiocbq);
11848 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11849 if (cmdiocbq->context2 && rspiocbq)
11850 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11851 &rspiocbq->iocb, sizeof(IOCB_t));
11853 /* Set the exchange busy flag for task management commands */
11854 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11855 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11856 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11858 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11861 pdone_q = cmdiocbq->context_un.wait_queue;
11864 spin_unlock_irqrestore(&phba->hbalock, iflags);
11869 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11870 * @phba: Pointer to HBA context object..
11871 * @piocbq: Pointer to command iocb.
11872 * @flag: Flag to test.
11874 * This routine grabs the hbalock and then test the iocb_flag to
11875 * see if the passed in flag is set.
11877 * 1 if flag is set.
11878 * 0 if flag is not set.
11881 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11882 struct lpfc_iocbq *piocbq, uint32_t flag)
11884 unsigned long iflags;
11887 spin_lock_irqsave(&phba->hbalock, iflags);
11888 ret = piocbq->iocb_flag & flag;
11889 spin_unlock_irqrestore(&phba->hbalock, iflags);
11895 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11896 * @phba: Pointer to HBA context object..
11897 * @pring: Pointer to sli ring.
11898 * @piocb: Pointer to command iocb.
11899 * @prspiocbq: Pointer to response iocb.
11900 * @timeout: Timeout in number of seconds.
11902 * This function issues the iocb to firmware and waits for the
11903 * iocb to complete. The iocb_cmpl field of the shall be used
11904 * to handle iocbs which time out. If the field is NULL, the
11905 * function shall free the iocbq structure. If more clean up is
11906 * needed, the caller is expected to provide a completion function
11907 * that will provide the needed clean up. If the iocb command is
11908 * not completed within timeout seconds, the function will either
11909 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11910 * completion function set in the iocb_cmpl field and then return
11911 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11912 * resources if this function returns IOCB_TIMEDOUT.
11913 * The function waits for the iocb completion using an
11914 * non-interruptible wait.
11915 * This function will sleep while waiting for iocb completion.
11916 * So, this function should not be called from any context which
11917 * does not allow sleeping. Due to the same reason, this function
11918 * cannot be called with interrupt disabled.
11919 * This function assumes that the iocb completions occur while
11920 * this function sleep. So, this function cannot be called from
11921 * the thread which process iocb completion for this ring.
11922 * This function clears the iocb_flag of the iocb object before
11923 * issuing the iocb and the iocb completion handler sets this
11924 * flag and wakes this thread when the iocb completes.
11925 * The contents of the response iocb will be copied to prspiocbq
11926 * by the completion handler when the command completes.
11927 * This function returns IOCB_SUCCESS when success.
11928 * This function is called with no lock held.
11931 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11932 uint32_t ring_number,
11933 struct lpfc_iocbq *piocb,
11934 struct lpfc_iocbq *prspiocbq,
11937 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11938 long timeleft, timeout_req = 0;
11939 int retval = IOCB_SUCCESS;
11941 struct lpfc_iocbq *iocb;
11943 int txcmplq_cnt = 0;
11944 struct lpfc_sli_ring *pring;
11945 unsigned long iflags;
11946 bool iocb_completed = true;
11948 if (phba->sli_rev >= LPFC_SLI_REV4)
11949 pring = lpfc_sli4_calc_ring(phba, piocb);
11951 pring = &phba->sli.sli3_ring[ring_number];
11953 * If the caller has provided a response iocbq buffer, then context2
11954 * is NULL or its an error.
11957 if (piocb->context2)
11959 piocb->context2 = prspiocbq;
11962 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11963 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11964 piocb->context_un.wait_queue = &done_q;
11965 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11967 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11968 if (lpfc_readl(phba->HCregaddr, &creg_val))
11970 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11971 writel(creg_val, phba->HCregaddr);
11972 readl(phba->HCregaddr); /* flush */
11975 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11976 SLI_IOCB_RET_IOCB);
11977 if (retval == IOCB_SUCCESS) {
11978 timeout_req = msecs_to_jiffies(timeout * 1000);
11979 timeleft = wait_event_timeout(done_q,
11980 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11982 spin_lock_irqsave(&phba->hbalock, iflags);
11983 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11986 * IOCB timed out. Inform the wake iocb wait
11987 * completion function and set local status
11990 iocb_completed = false;
11991 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11993 spin_unlock_irqrestore(&phba->hbalock, iflags);
11994 if (iocb_completed) {
11995 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11996 "0331 IOCB wake signaled\n");
11997 /* Note: we are not indicating if the IOCB has a success
11998 * status or not - that's for the caller to check.
11999 * IOCB_SUCCESS means just that the command was sent and
12000 * completed. Not that it completed successfully.
12002 } else if (timeleft == 0) {
12003 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12004 "0338 IOCB wait timeout error - no "
12005 "wake response Data x%x\n", timeout);
12006 retval = IOCB_TIMEDOUT;
12008 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12009 "0330 IOCB wake NOT set, "
12011 timeout, (timeleft / jiffies));
12012 retval = IOCB_TIMEDOUT;
12014 } else if (retval == IOCB_BUSY) {
12015 if (phba->cfg_log_verbose & LOG_SLI) {
12016 list_for_each_entry(iocb, &pring->txq, list) {
12019 list_for_each_entry(iocb, &pring->txcmplq, list) {
12022 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12023 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12024 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12028 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12029 "0332 IOCB wait issue failed, Data x%x\n",
12031 retval = IOCB_ERROR;
12034 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12035 if (lpfc_readl(phba->HCregaddr, &creg_val))
12037 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12038 writel(creg_val, phba->HCregaddr);
12039 readl(phba->HCregaddr); /* flush */
12043 piocb->context2 = NULL;
12045 piocb->context_un.wait_queue = NULL;
12046 piocb->iocb_cmpl = NULL;
12051 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12052 * @phba: Pointer to HBA context object.
12053 * @pmboxq: Pointer to driver mailbox object.
12054 * @timeout: Timeout in number of seconds.
12056 * This function issues the mailbox to firmware and waits for the
12057 * mailbox command to complete. If the mailbox command is not
12058 * completed within timeout seconds, it returns MBX_TIMEOUT.
12059 * The function waits for the mailbox completion using an
12060 * interruptible wait. If the thread is woken up due to a
12061 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12062 * should not free the mailbox resources, if this function returns
12064 * This function will sleep while waiting for mailbox completion.
12065 * So, this function should not be called from any context which
12066 * does not allow sleeping. Due to the same reason, this function
12067 * cannot be called with interrupt disabled.
12068 * This function assumes that the mailbox completion occurs while
12069 * this function sleep. So, this function cannot be called from
12070 * the worker thread which processes mailbox completion.
12071 * This function is called in the context of HBA management
12073 * This function returns MBX_SUCCESS when successful.
12074 * This function is called with no lock held.
12077 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12080 struct completion mbox_done;
12082 unsigned long flag;
12084 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12085 /* setup wake call as IOCB callback */
12086 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12088 /* setup context3 field to pass wait_queue pointer to wake function */
12089 init_completion(&mbox_done);
12090 pmboxq->context3 = &mbox_done;
12091 /* now issue the command */
12092 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12093 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12094 wait_for_completion_timeout(&mbox_done,
12095 msecs_to_jiffies(timeout * 1000));
12097 spin_lock_irqsave(&phba->hbalock, flag);
12098 pmboxq->context3 = NULL;
12100 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12101 * else do not free the resources.
12103 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12104 retval = MBX_SUCCESS;
12106 retval = MBX_TIMEOUT;
12107 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12109 spin_unlock_irqrestore(&phba->hbalock, flag);
12115 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12116 * @phba: Pointer to HBA context.
12118 * This function is called to shutdown the driver's mailbox sub-system.
12119 * It first marks the mailbox sub-system is in a block state to prevent
12120 * the asynchronous mailbox command from issued off the pending mailbox
12121 * command queue. If the mailbox command sub-system shutdown is due to
12122 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12123 * the mailbox sub-system flush routine to forcefully bring down the
12124 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12125 * as with offline or HBA function reset), this routine will wait for the
12126 * outstanding mailbox command to complete before invoking the mailbox
12127 * sub-system flush routine to gracefully bring down mailbox sub-system.
12130 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12132 struct lpfc_sli *psli = &phba->sli;
12133 unsigned long timeout;
12135 if (mbx_action == LPFC_MBX_NO_WAIT) {
12136 /* delay 100ms for port state */
12138 lpfc_sli_mbox_sys_flush(phba);
12141 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12143 /* Disable softirqs, including timers from obtaining phba->hbalock */
12144 local_bh_disable();
12146 spin_lock_irq(&phba->hbalock);
12147 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12149 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12150 /* Determine how long we might wait for the active mailbox
12151 * command to be gracefully completed by firmware.
12153 if (phba->sli.mbox_active)
12154 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12155 phba->sli.mbox_active) *
12157 spin_unlock_irq(&phba->hbalock);
12159 /* Enable softirqs again, done with phba->hbalock */
12162 while (phba->sli.mbox_active) {
12163 /* Check active mailbox complete status every 2ms */
12165 if (time_after(jiffies, timeout))
12166 /* Timeout, let the mailbox flush routine to
12167 * forcefully release active mailbox command
12172 spin_unlock_irq(&phba->hbalock);
12174 /* Enable softirqs again, done with phba->hbalock */
12178 lpfc_sli_mbox_sys_flush(phba);
12182 * lpfc_sli_eratt_read - read sli-3 error attention events
12183 * @phba: Pointer to HBA context.
12185 * This function is called to read the SLI3 device error attention registers
12186 * for possible error attention events. The caller must hold the hostlock
12187 * with spin_lock_irq().
12189 * This function returns 1 when there is Error Attention in the Host Attention
12190 * Register and returns 0 otherwise.
12193 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12197 /* Read chip Host Attention (HA) register */
12198 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12201 if (ha_copy & HA_ERATT) {
12202 /* Read host status register to retrieve error event */
12203 if (lpfc_sli_read_hs(phba))
12206 /* Check if there is a deferred error condition is active */
12207 if ((HS_FFER1 & phba->work_hs) &&
12208 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12209 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12210 phba->hba_flag |= DEFER_ERATT;
12211 /* Clear all interrupt enable conditions */
12212 writel(0, phba->HCregaddr);
12213 readl(phba->HCregaddr);
12216 /* Set the driver HA work bitmap */
12217 phba->work_ha |= HA_ERATT;
12218 /* Indicate polling handles this ERATT */
12219 phba->hba_flag |= HBA_ERATT_HANDLED;
12225 /* Set the driver HS work bitmap */
12226 phba->work_hs |= UNPLUG_ERR;
12227 /* Set the driver HA work bitmap */
12228 phba->work_ha |= HA_ERATT;
12229 /* Indicate polling handles this ERATT */
12230 phba->hba_flag |= HBA_ERATT_HANDLED;
12235 * lpfc_sli4_eratt_read - read sli-4 error attention events
12236 * @phba: Pointer to HBA context.
12238 * This function is called to read the SLI4 device error attention registers
12239 * for possible error attention events. The caller must hold the hostlock
12240 * with spin_lock_irq().
12242 * This function returns 1 when there is Error Attention in the Host Attention
12243 * Register and returns 0 otherwise.
12246 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12248 uint32_t uerr_sta_hi, uerr_sta_lo;
12249 uint32_t if_type, portsmphr;
12250 struct lpfc_register portstat_reg;
12253 * For now, use the SLI4 device internal unrecoverable error
12254 * registers for error attention. This can be changed later.
12256 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12258 case LPFC_SLI_INTF_IF_TYPE_0:
12259 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12261 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12263 phba->work_hs |= UNPLUG_ERR;
12264 phba->work_ha |= HA_ERATT;
12265 phba->hba_flag |= HBA_ERATT_HANDLED;
12268 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12269 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12271 "1423 HBA Unrecoverable error: "
12272 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12273 "ue_mask_lo_reg=0x%x, "
12274 "ue_mask_hi_reg=0x%x\n",
12275 uerr_sta_lo, uerr_sta_hi,
12276 phba->sli4_hba.ue_mask_lo,
12277 phba->sli4_hba.ue_mask_hi);
12278 phba->work_status[0] = uerr_sta_lo;
12279 phba->work_status[1] = uerr_sta_hi;
12280 phba->work_ha |= HA_ERATT;
12281 phba->hba_flag |= HBA_ERATT_HANDLED;
12285 case LPFC_SLI_INTF_IF_TYPE_2:
12286 case LPFC_SLI_INTF_IF_TYPE_6:
12287 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12288 &portstat_reg.word0) ||
12289 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12291 phba->work_hs |= UNPLUG_ERR;
12292 phba->work_ha |= HA_ERATT;
12293 phba->hba_flag |= HBA_ERATT_HANDLED;
12296 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12297 phba->work_status[0] =
12298 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12299 phba->work_status[1] =
12300 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12301 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12302 "2885 Port Status Event: "
12303 "port status reg 0x%x, "
12304 "port smphr reg 0x%x, "
12305 "error 1=0x%x, error 2=0x%x\n",
12306 portstat_reg.word0,
12308 phba->work_status[0],
12309 phba->work_status[1]);
12310 phba->work_ha |= HA_ERATT;
12311 phba->hba_flag |= HBA_ERATT_HANDLED;
12315 case LPFC_SLI_INTF_IF_TYPE_1:
12317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12318 "2886 HBA Error Attention on unsupported "
12319 "if type %d.", if_type);
12327 * lpfc_sli_check_eratt - check error attention events
12328 * @phba: Pointer to HBA context.
12330 * This function is called from timer soft interrupt context to check HBA's
12331 * error attention register bit for error attention events.
12333 * This function returns 1 when there is Error Attention in the Host Attention
12334 * Register and returns 0 otherwise.
12337 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12341 /* If somebody is waiting to handle an eratt, don't process it
12342 * here. The brdkill function will do this.
12344 if (phba->link_flag & LS_IGNORE_ERATT)
12347 /* Check if interrupt handler handles this ERATT */
12348 spin_lock_irq(&phba->hbalock);
12349 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12350 /* Interrupt handler has handled ERATT */
12351 spin_unlock_irq(&phba->hbalock);
12356 * If there is deferred error attention, do not check for error
12359 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12360 spin_unlock_irq(&phba->hbalock);
12364 /* If PCI channel is offline, don't process it */
12365 if (unlikely(pci_channel_offline(phba->pcidev))) {
12366 spin_unlock_irq(&phba->hbalock);
12370 switch (phba->sli_rev) {
12371 case LPFC_SLI_REV2:
12372 case LPFC_SLI_REV3:
12373 /* Read chip Host Attention (HA) register */
12374 ha_copy = lpfc_sli_eratt_read(phba);
12376 case LPFC_SLI_REV4:
12377 /* Read device Uncoverable Error (UERR) registers */
12378 ha_copy = lpfc_sli4_eratt_read(phba);
12381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12382 "0299 Invalid SLI revision (%d)\n",
12387 spin_unlock_irq(&phba->hbalock);
12393 * lpfc_intr_state_check - Check device state for interrupt handling
12394 * @phba: Pointer to HBA context.
12396 * This inline routine checks whether a device or its PCI slot is in a state
12397 * that the interrupt should be handled.
12399 * This function returns 0 if the device or the PCI slot is in a state that
12400 * interrupt should be handled, otherwise -EIO.
12403 lpfc_intr_state_check(struct lpfc_hba *phba)
12405 /* If the pci channel is offline, ignore all the interrupts */
12406 if (unlikely(pci_channel_offline(phba->pcidev)))
12409 /* Update device level interrupt statistics */
12410 phba->sli.slistat.sli_intr++;
12412 /* Ignore all interrupts during initialization. */
12413 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12420 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12421 * @irq: Interrupt number.
12422 * @dev_id: The device context pointer.
12424 * This function is directly called from the PCI layer as an interrupt
12425 * service routine when device with SLI-3 interface spec is enabled with
12426 * MSI-X multi-message interrupt mode and there are slow-path events in
12427 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12428 * interrupt mode, this function is called as part of the device-level
12429 * interrupt handler. When the PCI slot is in error recovery or the HBA
12430 * is undergoing initialization, the interrupt handler will not process
12431 * the interrupt. The link attention and ELS ring attention events are
12432 * handled by the worker thread. The interrupt handler signals the worker
12433 * thread and returns for these events. This function is called without
12434 * any lock held. It gets the hbalock to access and update SLI data
12437 * This function returns IRQ_HANDLED when interrupt is handled else it
12438 * returns IRQ_NONE.
12441 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12443 struct lpfc_hba *phba;
12444 uint32_t ha_copy, hc_copy;
12445 uint32_t work_ha_copy;
12446 unsigned long status;
12447 unsigned long iflag;
12450 MAILBOX_t *mbox, *pmbox;
12451 struct lpfc_vport *vport;
12452 struct lpfc_nodelist *ndlp;
12453 struct lpfc_dmabuf *mp;
12458 * Get the driver's phba structure from the dev_id and
12459 * assume the HBA is not interrupting.
12461 phba = (struct lpfc_hba *)dev_id;
12463 if (unlikely(!phba))
12467 * Stuff needs to be attented to when this function is invoked as an
12468 * individual interrupt handler in MSI-X multi-message interrupt mode
12470 if (phba->intr_type == MSIX) {
12471 /* Check device state for handling interrupt */
12472 if (lpfc_intr_state_check(phba))
12474 /* Need to read HA REG for slow-path events */
12475 spin_lock_irqsave(&phba->hbalock, iflag);
12476 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12478 /* If somebody is waiting to handle an eratt don't process it
12479 * here. The brdkill function will do this.
12481 if (phba->link_flag & LS_IGNORE_ERATT)
12482 ha_copy &= ~HA_ERATT;
12483 /* Check the need for handling ERATT in interrupt handler */
12484 if (ha_copy & HA_ERATT) {
12485 if (phba->hba_flag & HBA_ERATT_HANDLED)
12486 /* ERATT polling has handled ERATT */
12487 ha_copy &= ~HA_ERATT;
12489 /* Indicate interrupt handler handles ERATT */
12490 phba->hba_flag |= HBA_ERATT_HANDLED;
12494 * If there is deferred error attention, do not check for any
12497 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12498 spin_unlock_irqrestore(&phba->hbalock, iflag);
12502 /* Clear up only attention source related to slow-path */
12503 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12506 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12507 HC_LAINT_ENA | HC_ERINT_ENA),
12509 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12511 writel(hc_copy, phba->HCregaddr);
12512 readl(phba->HAregaddr); /* flush */
12513 spin_unlock_irqrestore(&phba->hbalock, iflag);
12515 ha_copy = phba->ha_copy;
12517 work_ha_copy = ha_copy & phba->work_ha_mask;
12519 if (work_ha_copy) {
12520 if (work_ha_copy & HA_LATT) {
12521 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12523 * Turn off Link Attention interrupts
12524 * until CLEAR_LA done
12526 spin_lock_irqsave(&phba->hbalock, iflag);
12527 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12528 if (lpfc_readl(phba->HCregaddr, &control))
12530 control &= ~HC_LAINT_ENA;
12531 writel(control, phba->HCregaddr);
12532 readl(phba->HCregaddr); /* flush */
12533 spin_unlock_irqrestore(&phba->hbalock, iflag);
12536 work_ha_copy &= ~HA_LATT;
12539 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12541 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12542 * the only slow ring.
12544 status = (work_ha_copy &
12545 (HA_RXMASK << (4*LPFC_ELS_RING)));
12546 status >>= (4*LPFC_ELS_RING);
12547 if (status & HA_RXMASK) {
12548 spin_lock_irqsave(&phba->hbalock, iflag);
12549 if (lpfc_readl(phba->HCregaddr, &control))
12552 lpfc_debugfs_slow_ring_trc(phba,
12553 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12555 (uint32_t)phba->sli.slistat.sli_intr);
12557 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12558 lpfc_debugfs_slow_ring_trc(phba,
12559 "ISR Disable ring:"
12560 "pwork:x%x hawork:x%x wait:x%x",
12561 phba->work_ha, work_ha_copy,
12562 (uint32_t)((unsigned long)
12563 &phba->work_waitq));
12566 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12567 writel(control, phba->HCregaddr);
12568 readl(phba->HCregaddr); /* flush */
12571 lpfc_debugfs_slow_ring_trc(phba,
12572 "ISR slow ring: pwork:"
12573 "x%x hawork:x%x wait:x%x",
12574 phba->work_ha, work_ha_copy,
12575 (uint32_t)((unsigned long)
12576 &phba->work_waitq));
12578 spin_unlock_irqrestore(&phba->hbalock, iflag);
12581 spin_lock_irqsave(&phba->hbalock, iflag);
12582 if (work_ha_copy & HA_ERATT) {
12583 if (lpfc_sli_read_hs(phba))
12586 * Check if there is a deferred error condition
12589 if ((HS_FFER1 & phba->work_hs) &&
12590 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12591 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12593 phba->hba_flag |= DEFER_ERATT;
12594 /* Clear all interrupt enable conditions */
12595 writel(0, phba->HCregaddr);
12596 readl(phba->HCregaddr);
12600 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12601 pmb = phba->sli.mbox_active;
12602 pmbox = &pmb->u.mb;
12604 vport = pmb->vport;
12606 /* First check out the status word */
12607 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12608 if (pmbox->mbxOwner != OWN_HOST) {
12609 spin_unlock_irqrestore(&phba->hbalock, iflag);
12611 * Stray Mailbox Interrupt, mbxCommand <cmd>
12612 * mbxStatus <status>
12614 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12616 "(%d):0304 Stray Mailbox "
12617 "Interrupt mbxCommand x%x "
12619 (vport ? vport->vpi : 0),
12622 /* clear mailbox attention bit */
12623 work_ha_copy &= ~HA_MBATT;
12625 phba->sli.mbox_active = NULL;
12626 spin_unlock_irqrestore(&phba->hbalock, iflag);
12627 phba->last_completion_time = jiffies;
12628 del_timer(&phba->sli.mbox_tmo);
12629 if (pmb->mbox_cmpl) {
12630 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12632 if (pmb->out_ext_byte_len &&
12634 lpfc_sli_pcimem_bcopy(
12637 pmb->out_ext_byte_len);
12639 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12640 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12642 lpfc_debugfs_disc_trc(vport,
12643 LPFC_DISC_TRC_MBOX_VPORT,
12644 "MBOX dflt rpi: : "
12645 "status:x%x rpi:x%x",
12646 (uint32_t)pmbox->mbxStatus,
12647 pmbox->un.varWords[0], 0);
12649 if (!pmbox->mbxStatus) {
12650 mp = (struct lpfc_dmabuf *)
12652 ndlp = (struct lpfc_nodelist *)
12655 /* Reg_LOGIN of dflt RPI was
12656 * successful. new lets get
12657 * rid of the RPI using the
12658 * same mbox buffer.
12660 lpfc_unreg_login(phba,
12662 pmbox->un.varWords[0],
12665 lpfc_mbx_cmpl_dflt_rpi;
12667 pmb->ctx_ndlp = ndlp;
12668 pmb->vport = vport;
12669 rc = lpfc_sli_issue_mbox(phba,
12672 if (rc != MBX_BUSY)
12673 lpfc_printf_log(phba,
12675 LOG_MBOX | LOG_SLI,
12676 "0350 rc should have"
12677 "been MBX_BUSY\n");
12678 if (rc != MBX_NOT_FINISHED)
12679 goto send_current_mbox;
12683 &phba->pport->work_port_lock,
12685 phba->pport->work_port_events &=
12687 spin_unlock_irqrestore(
12688 &phba->pport->work_port_lock,
12690 lpfc_mbox_cmpl_put(phba, pmb);
12693 spin_unlock_irqrestore(&phba->hbalock, iflag);
12695 if ((work_ha_copy & HA_MBATT) &&
12696 (phba->sli.mbox_active == NULL)) {
12698 /* Process next mailbox command if there is one */
12700 rc = lpfc_sli_issue_mbox(phba, NULL,
12702 } while (rc == MBX_NOT_FINISHED);
12703 if (rc != MBX_SUCCESS)
12704 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12705 LOG_SLI, "0349 rc should be "
12709 spin_lock_irqsave(&phba->hbalock, iflag);
12710 phba->work_ha |= work_ha_copy;
12711 spin_unlock_irqrestore(&phba->hbalock, iflag);
12712 lpfc_worker_wake_up(phba);
12714 return IRQ_HANDLED;
12716 spin_unlock_irqrestore(&phba->hbalock, iflag);
12717 return IRQ_HANDLED;
12719 } /* lpfc_sli_sp_intr_handler */
12722 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12723 * @irq: Interrupt number.
12724 * @dev_id: The device context pointer.
12726 * This function is directly called from the PCI layer as an interrupt
12727 * service routine when device with SLI-3 interface spec is enabled with
12728 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12729 * ring event in the HBA. However, when the device is enabled with either
12730 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12731 * device-level interrupt handler. When the PCI slot is in error recovery
12732 * or the HBA is undergoing initialization, the interrupt handler will not
12733 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12734 * the intrrupt context. This function is called without any lock held.
12735 * It gets the hbalock to access and update SLI data structures.
12737 * This function returns IRQ_HANDLED when interrupt is handled else it
12738 * returns IRQ_NONE.
12741 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12743 struct lpfc_hba *phba;
12745 unsigned long status;
12746 unsigned long iflag;
12747 struct lpfc_sli_ring *pring;
12749 /* Get the driver's phba structure from the dev_id and
12750 * assume the HBA is not interrupting.
12752 phba = (struct lpfc_hba *) dev_id;
12754 if (unlikely(!phba))
12758 * Stuff needs to be attented to when this function is invoked as an
12759 * individual interrupt handler in MSI-X multi-message interrupt mode
12761 if (phba->intr_type == MSIX) {
12762 /* Check device state for handling interrupt */
12763 if (lpfc_intr_state_check(phba))
12765 /* Need to read HA REG for FCP ring and other ring events */
12766 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12767 return IRQ_HANDLED;
12768 /* Clear up only attention source related to fast-path */
12769 spin_lock_irqsave(&phba->hbalock, iflag);
12771 * If there is deferred error attention, do not check for
12774 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12775 spin_unlock_irqrestore(&phba->hbalock, iflag);
12778 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12780 readl(phba->HAregaddr); /* flush */
12781 spin_unlock_irqrestore(&phba->hbalock, iflag);
12783 ha_copy = phba->ha_copy;
12786 * Process all events on FCP ring. Take the optimized path for FCP IO.
12788 ha_copy &= ~(phba->work_ha_mask);
12790 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12791 status >>= (4*LPFC_FCP_RING);
12792 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12793 if (status & HA_RXMASK)
12794 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12796 if (phba->cfg_multi_ring_support == 2) {
12798 * Process all events on extra ring. Take the optimized path
12799 * for extra ring IO.
12801 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12802 status >>= (4*LPFC_EXTRA_RING);
12803 if (status & HA_RXMASK) {
12804 lpfc_sli_handle_fast_ring_event(phba,
12805 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12809 return IRQ_HANDLED;
12810 } /* lpfc_sli_fp_intr_handler */
12813 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12814 * @irq: Interrupt number.
12815 * @dev_id: The device context pointer.
12817 * This function is the HBA device-level interrupt handler to device with
12818 * SLI-3 interface spec, called from the PCI layer when either MSI or
12819 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12820 * requires driver attention. This function invokes the slow-path interrupt
12821 * attention handling function and fast-path interrupt attention handling
12822 * function in turn to process the relevant HBA attention events. This
12823 * function is called without any lock held. It gets the hbalock to access
12824 * and update SLI data structures.
12826 * This function returns IRQ_HANDLED when interrupt is handled, else it
12827 * returns IRQ_NONE.
12830 lpfc_sli_intr_handler(int irq, void *dev_id)
12832 struct lpfc_hba *phba;
12833 irqreturn_t sp_irq_rc, fp_irq_rc;
12834 unsigned long status1, status2;
12838 * Get the driver's phba structure from the dev_id and
12839 * assume the HBA is not interrupting.
12841 phba = (struct lpfc_hba *) dev_id;
12843 if (unlikely(!phba))
12846 /* Check device state for handling interrupt */
12847 if (lpfc_intr_state_check(phba))
12850 spin_lock(&phba->hbalock);
12851 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12852 spin_unlock(&phba->hbalock);
12853 return IRQ_HANDLED;
12856 if (unlikely(!phba->ha_copy)) {
12857 spin_unlock(&phba->hbalock);
12859 } else if (phba->ha_copy & HA_ERATT) {
12860 if (phba->hba_flag & HBA_ERATT_HANDLED)
12861 /* ERATT polling has handled ERATT */
12862 phba->ha_copy &= ~HA_ERATT;
12864 /* Indicate interrupt handler handles ERATT */
12865 phba->hba_flag |= HBA_ERATT_HANDLED;
12869 * If there is deferred error attention, do not check for any interrupt.
12871 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12872 spin_unlock(&phba->hbalock);
12876 /* Clear attention sources except link and error attentions */
12877 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12878 spin_unlock(&phba->hbalock);
12879 return IRQ_HANDLED;
12881 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12882 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12884 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12885 writel(hc_copy, phba->HCregaddr);
12886 readl(phba->HAregaddr); /* flush */
12887 spin_unlock(&phba->hbalock);
12890 * Invokes slow-path host attention interrupt handling as appropriate.
12893 /* status of events with mailbox and link attention */
12894 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12896 /* status of events with ELS ring */
12897 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12898 status2 >>= (4*LPFC_ELS_RING);
12900 if (status1 || (status2 & HA_RXMASK))
12901 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12903 sp_irq_rc = IRQ_NONE;
12906 * Invoke fast-path host attention interrupt handling as appropriate.
12909 /* status of events with FCP ring */
12910 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12911 status1 >>= (4*LPFC_FCP_RING);
12913 /* status of events with extra ring */
12914 if (phba->cfg_multi_ring_support == 2) {
12915 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12916 status2 >>= (4*LPFC_EXTRA_RING);
12920 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12921 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12923 fp_irq_rc = IRQ_NONE;
12925 /* Return device-level interrupt handling status */
12926 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12927 } /* lpfc_sli_intr_handler */
12930 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12931 * @phba: pointer to lpfc hba data structure.
12933 * This routine is invoked by the worker thread to process all the pending
12934 * SLI4 FCP abort XRI events.
12936 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12938 struct lpfc_cq_event *cq_event;
12940 /* First, declare the fcp xri abort event has been handled */
12941 spin_lock_irq(&phba->hbalock);
12942 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12943 spin_unlock_irq(&phba->hbalock);
12944 /* Now, handle all the fcp xri abort events */
12945 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12946 /* Get the first event from the head of the event queue */
12947 spin_lock_irq(&phba->hbalock);
12948 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12949 cq_event, struct lpfc_cq_event, list);
12950 spin_unlock_irq(&phba->hbalock);
12951 /* Notify aborted XRI for FCP work queue */
12952 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12953 /* Free the event processed back to the free pool */
12954 lpfc_sli4_cq_event_release(phba, cq_event);
12959 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12960 * @phba: pointer to lpfc hba data structure.
12962 * This routine is invoked by the worker thread to process all the pending
12963 * SLI4 els abort xri events.
12965 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12967 struct lpfc_cq_event *cq_event;
12969 /* First, declare the els xri abort event has been handled */
12970 spin_lock_irq(&phba->hbalock);
12971 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12972 spin_unlock_irq(&phba->hbalock);
12973 /* Now, handle all the els xri abort events */
12974 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12975 /* Get the first event from the head of the event queue */
12976 spin_lock_irq(&phba->hbalock);
12977 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12978 cq_event, struct lpfc_cq_event, list);
12979 spin_unlock_irq(&phba->hbalock);
12980 /* Notify aborted XRI for ELS work queue */
12981 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12982 /* Free the event processed back to the free pool */
12983 lpfc_sli4_cq_event_release(phba, cq_event);
12988 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12989 * @phba: pointer to lpfc hba data structure
12990 * @pIocbIn: pointer to the rspiocbq
12991 * @pIocbOut: pointer to the cmdiocbq
12992 * @wcqe: pointer to the complete wcqe
12994 * This routine transfers the fields of a command iocbq to a response iocbq
12995 * by copying all the IOCB fields from command iocbq and transferring the
12996 * completion status information from the complete wcqe.
12999 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13000 struct lpfc_iocbq *pIocbIn,
13001 struct lpfc_iocbq *pIocbOut,
13002 struct lpfc_wcqe_complete *wcqe)
13005 unsigned long iflags;
13006 uint32_t status, max_response;
13007 struct lpfc_dmabuf *dmabuf;
13008 struct ulp_bde64 *bpl, bde;
13009 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13011 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13012 sizeof(struct lpfc_iocbq) - offset);
13013 /* Map WCQE parameters into irspiocb parameters */
13014 status = bf_get(lpfc_wcqe_c_status, wcqe);
13015 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13016 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13017 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13018 pIocbIn->iocb.un.fcpi.fcpi_parm =
13019 pIocbOut->iocb.un.fcpi.fcpi_parm -
13020 wcqe->total_data_placed;
13022 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13024 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13025 switch (pIocbOut->iocb.ulpCommand) {
13026 case CMD_ELS_REQUEST64_CR:
13027 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13028 bpl = (struct ulp_bde64 *)dmabuf->virt;
13029 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13030 max_response = bde.tus.f.bdeSize;
13032 case CMD_GEN_REQUEST64_CR:
13034 if (!pIocbOut->context3)
13036 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13037 sizeof(struct ulp_bde64);
13038 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13039 bpl = (struct ulp_bde64 *)dmabuf->virt;
13040 for (i = 0; i < numBdes; i++) {
13041 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13042 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13043 max_response += bde.tus.f.bdeSize;
13047 max_response = wcqe->total_data_placed;
13050 if (max_response < wcqe->total_data_placed)
13051 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13053 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13054 wcqe->total_data_placed;
13057 /* Convert BG errors for completion status */
13058 if (status == CQE_STATUS_DI_ERROR) {
13059 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13061 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13062 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13064 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13066 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13067 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13068 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13069 BGS_GUARD_ERR_MASK;
13070 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13071 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13072 BGS_APPTAG_ERR_MASK;
13073 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13074 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13075 BGS_REFTAG_ERR_MASK;
13077 /* Check to see if there was any good data before the error */
13078 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13079 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13080 BGS_HI_WATER_MARK_PRESENT_MASK;
13081 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13082 wcqe->total_data_placed;
13086 * Set ALL the error bits to indicate we don't know what
13087 * type of error it is.
13089 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13090 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13091 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13092 BGS_GUARD_ERR_MASK);
13095 /* Pick up HBA exchange busy condition */
13096 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13097 spin_lock_irqsave(&phba->hbalock, iflags);
13098 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13099 spin_unlock_irqrestore(&phba->hbalock, iflags);
13104 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13105 * @phba: Pointer to HBA context object.
13106 * @wcqe: Pointer to work-queue completion queue entry.
13108 * This routine handles an ELS work-queue completion event and construct
13109 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13110 * discovery engine to handle.
13112 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13114 static struct lpfc_iocbq *
13115 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13116 struct lpfc_iocbq *irspiocbq)
13118 struct lpfc_sli_ring *pring;
13119 struct lpfc_iocbq *cmdiocbq;
13120 struct lpfc_wcqe_complete *wcqe;
13121 unsigned long iflags;
13123 pring = lpfc_phba_elsring(phba);
13124 if (unlikely(!pring))
13127 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13128 spin_lock_irqsave(&pring->ring_lock, iflags);
13129 pring->stats.iocb_event++;
13130 /* Look up the ELS command IOCB and create pseudo response IOCB */
13131 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13132 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13133 if (unlikely(!cmdiocbq)) {
13134 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13135 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13136 "0386 ELS complete with no corresponding "
13137 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13138 wcqe->word0, wcqe->total_data_placed,
13139 wcqe->parameter, wcqe->word3);
13140 lpfc_sli_release_iocbq(phba, irspiocbq);
13144 /* Put the iocb back on the txcmplq */
13145 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13146 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13148 /* Fake the irspiocbq and copy necessary response information */
13149 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13154 inline struct lpfc_cq_event *
13155 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13157 struct lpfc_cq_event *cq_event;
13159 /* Allocate a new internal CQ_EVENT entry */
13160 cq_event = lpfc_sli4_cq_event_alloc(phba);
13162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13163 "0602 Failed to alloc CQ_EVENT entry\n");
13167 /* Move the CQE into the event */
13168 memcpy(&cq_event->cqe, entry, size);
13173 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13174 * @phba: Pointer to HBA context object.
13175 * @cqe: Pointer to mailbox completion queue entry.
13177 * This routine process a mailbox completion queue entry with asynchrous
13180 * Return: true if work posted to worker thread, otherwise false.
13183 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13185 struct lpfc_cq_event *cq_event;
13186 unsigned long iflags;
13188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13189 "0392 Async Event: word0:x%x, word1:x%x, "
13190 "word2:x%x, word3:x%x\n", mcqe->word0,
13191 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13193 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13196 spin_lock_irqsave(&phba->hbalock, iflags);
13197 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13198 /* Set the async event flag */
13199 phba->hba_flag |= ASYNC_EVENT;
13200 spin_unlock_irqrestore(&phba->hbalock, iflags);
13206 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13207 * @phba: Pointer to HBA context object.
13208 * @cqe: Pointer to mailbox completion queue entry.
13210 * This routine process a mailbox completion queue entry with mailbox
13211 * completion event.
13213 * Return: true if work posted to worker thread, otherwise false.
13216 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13218 uint32_t mcqe_status;
13219 MAILBOX_t *mbox, *pmbox;
13220 struct lpfc_mqe *mqe;
13221 struct lpfc_vport *vport;
13222 struct lpfc_nodelist *ndlp;
13223 struct lpfc_dmabuf *mp;
13224 unsigned long iflags;
13226 bool workposted = false;
13229 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13230 if (!bf_get(lpfc_trailer_completed, mcqe))
13231 goto out_no_mqe_complete;
13233 /* Get the reference to the active mbox command */
13234 spin_lock_irqsave(&phba->hbalock, iflags);
13235 pmb = phba->sli.mbox_active;
13236 if (unlikely(!pmb)) {
13237 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13238 "1832 No pending MBOX command to handle\n");
13239 spin_unlock_irqrestore(&phba->hbalock, iflags);
13240 goto out_no_mqe_complete;
13242 spin_unlock_irqrestore(&phba->hbalock, iflags);
13244 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13246 vport = pmb->vport;
13248 /* Reset heartbeat timer */
13249 phba->last_completion_time = jiffies;
13250 del_timer(&phba->sli.mbox_tmo);
13252 /* Move mbox data to caller's mailbox region, do endian swapping */
13253 if (pmb->mbox_cmpl && mbox)
13254 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13257 * For mcqe errors, conditionally move a modified error code to
13258 * the mbox so that the error will not be missed.
13260 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13261 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13262 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13263 bf_set(lpfc_mqe_status, mqe,
13264 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13266 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13267 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13268 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13269 "MBOX dflt rpi: status:x%x rpi:x%x",
13271 pmbox->un.varWords[0], 0);
13272 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13273 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13274 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13275 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13276 * RID of the PPI using the same mbox buffer.
13278 lpfc_unreg_login(phba, vport->vpi,
13279 pmbox->un.varWords[0], pmb);
13280 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13282 pmb->ctx_ndlp = ndlp;
13283 pmb->vport = vport;
13284 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13285 if (rc != MBX_BUSY)
13286 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13287 LOG_SLI, "0385 rc should "
13288 "have been MBX_BUSY\n");
13289 if (rc != MBX_NOT_FINISHED)
13290 goto send_current_mbox;
13293 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13294 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13295 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13297 /* There is mailbox completion work to do */
13298 spin_lock_irqsave(&phba->hbalock, iflags);
13299 __lpfc_mbox_cmpl_put(phba, pmb);
13300 phba->work_ha |= HA_MBATT;
13301 spin_unlock_irqrestore(&phba->hbalock, iflags);
13305 spin_lock_irqsave(&phba->hbalock, iflags);
13306 /* Release the mailbox command posting token */
13307 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13308 /* Setting active mailbox pointer need to be in sync to flag clear */
13309 phba->sli.mbox_active = NULL;
13310 spin_unlock_irqrestore(&phba->hbalock, iflags);
13311 /* Wake up worker thread to post the next pending mailbox command */
13312 lpfc_worker_wake_up(phba);
13313 out_no_mqe_complete:
13314 if (bf_get(lpfc_trailer_consumed, mcqe))
13315 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13320 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13321 * @phba: Pointer to HBA context object.
13322 * @cqe: Pointer to mailbox completion queue entry.
13324 * This routine process a mailbox completion queue entry, it invokes the
13325 * proper mailbox complete handling or asynchrous event handling routine
13326 * according to the MCQE's async bit.
13328 * Return: true if work posted to worker thread, otherwise false.
13331 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
13333 struct lpfc_mcqe mcqe;
13336 /* Copy the mailbox MCQE and convert endian order as needed */
13337 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13339 /* Invoke the proper event handling routine */
13340 if (!bf_get(lpfc_trailer_async, &mcqe))
13341 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13343 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13348 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13349 * @phba: Pointer to HBA context object.
13350 * @cq: Pointer to associated CQ
13351 * @wcqe: Pointer to work-queue completion queue entry.
13353 * This routine handles an ELS work-queue completion event.
13355 * Return: true if work posted to worker thread, otherwise false.
13358 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13359 struct lpfc_wcqe_complete *wcqe)
13361 struct lpfc_iocbq *irspiocbq;
13362 unsigned long iflags;
13363 struct lpfc_sli_ring *pring = cq->pring;
13365 int txcmplq_cnt = 0;
13366 int fcp_txcmplq_cnt = 0;
13368 /* Check for response status */
13369 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13370 /* Log the error status */
13371 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13372 "0357 ELS CQE error: status=x%x: "
13373 "CQE: %08x %08x %08x %08x\n",
13374 bf_get(lpfc_wcqe_c_status, wcqe),
13375 wcqe->word0, wcqe->total_data_placed,
13376 wcqe->parameter, wcqe->word3);
13379 /* Get an irspiocbq for later ELS response processing use */
13380 irspiocbq = lpfc_sli_get_iocbq(phba);
13382 if (!list_empty(&pring->txq))
13384 if (!list_empty(&pring->txcmplq))
13386 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13387 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13388 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13389 txq_cnt, phba->iocb_cnt,
13395 /* Save off the slow-path queue event for work thread to process */
13396 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13397 spin_lock_irqsave(&phba->hbalock, iflags);
13398 list_add_tail(&irspiocbq->cq_event.list,
13399 &phba->sli4_hba.sp_queue_event);
13400 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13401 spin_unlock_irqrestore(&phba->hbalock, iflags);
13407 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13408 * @phba: Pointer to HBA context object.
13409 * @wcqe: Pointer to work-queue completion queue entry.
13411 * This routine handles slow-path WQ entry consumed event by invoking the
13412 * proper WQ release routine to the slow-path WQ.
13415 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13416 struct lpfc_wcqe_release *wcqe)
13418 /* sanity check on queue memory */
13419 if (unlikely(!phba->sli4_hba.els_wq))
13421 /* Check for the slow-path ELS work queue */
13422 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13423 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13424 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13426 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13427 "2579 Slow-path wqe consume event carries "
13428 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13429 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13430 phba->sli4_hba.els_wq->queue_id);
13434 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13435 * @phba: Pointer to HBA context object.
13436 * @cq: Pointer to a WQ completion queue.
13437 * @wcqe: Pointer to work-queue completion queue entry.
13439 * This routine handles an XRI abort event.
13441 * Return: true if work posted to worker thread, otherwise false.
13444 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13445 struct lpfc_queue *cq,
13446 struct sli4_wcqe_xri_aborted *wcqe)
13448 bool workposted = false;
13449 struct lpfc_cq_event *cq_event;
13450 unsigned long iflags;
13452 switch (cq->subtype) {
13454 cq_event = lpfc_cq_event_setup(
13455 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13458 spin_lock_irqsave(&phba->hbalock, iflags);
13459 list_add_tail(&cq_event->list,
13460 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
13461 /* Set the fcp xri abort event flag */
13462 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
13463 spin_unlock_irqrestore(&phba->hbalock, iflags);
13466 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13468 cq_event = lpfc_cq_event_setup(
13469 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13472 spin_lock_irqsave(&phba->hbalock, iflags);
13473 list_add_tail(&cq_event->list,
13474 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13475 /* Set the els xri abort event flag */
13476 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13477 spin_unlock_irqrestore(&phba->hbalock, iflags);
13481 /* Notify aborted XRI for NVME work queue */
13482 if (phba->nvmet_support)
13483 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13485 lpfc_sli4_nvme_xri_aborted(phba, wcqe);
13487 workposted = false;
13490 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13491 "0603 Invalid CQ subtype %d: "
13492 "%08x %08x %08x %08x\n",
13493 cq->subtype, wcqe->word0, wcqe->parameter,
13494 wcqe->word2, wcqe->word3);
13495 workposted = false;
13501 #define FC_RCTL_MDS_DIAGS 0xF4
13504 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13505 * @phba: Pointer to HBA context object.
13506 * @rcqe: Pointer to receive-queue completion queue entry.
13508 * This routine process a receive-queue completion queue entry.
13510 * Return: true if work posted to worker thread, otherwise false.
13513 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13515 bool workposted = false;
13516 struct fc_frame_header *fc_hdr;
13517 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13518 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13519 struct lpfc_nvmet_tgtport *tgtp;
13520 struct hbq_dmabuf *dma_buf;
13521 uint32_t status, rq_id;
13522 unsigned long iflags;
13524 /* sanity check on queue memory */
13525 if (unlikely(!hrq) || unlikely(!drq))
13528 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13529 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13531 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13532 if (rq_id != hrq->queue_id)
13535 status = bf_get(lpfc_rcqe_status, rcqe);
13537 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13538 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13539 "2537 Receive Frame Truncated!!\n");
13540 case FC_STATUS_RQ_SUCCESS:
13541 spin_lock_irqsave(&phba->hbalock, iflags);
13542 lpfc_sli4_rq_release(hrq, drq);
13543 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13545 hrq->RQ_no_buf_found++;
13546 spin_unlock_irqrestore(&phba->hbalock, iflags);
13550 hrq->RQ_buf_posted--;
13551 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13553 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13555 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13556 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13557 spin_unlock_irqrestore(&phba->hbalock, iflags);
13558 /* Handle MDS Loopback frames */
13559 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13563 /* save off the frame for the work thread to process */
13564 list_add_tail(&dma_buf->cq_event.list,
13565 &phba->sli4_hba.sp_queue_event);
13566 /* Frame received */
13567 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13568 spin_unlock_irqrestore(&phba->hbalock, iflags);
13571 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13572 if (phba->nvmet_support) {
13573 tgtp = phba->targetport->private;
13574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13575 "6402 RQE Error x%x, posted %d err_cnt "
13577 status, hrq->RQ_buf_posted,
13578 hrq->RQ_no_posted_buf,
13579 atomic_read(&tgtp->rcv_fcp_cmd_in),
13580 atomic_read(&tgtp->rcv_fcp_cmd_out),
13581 atomic_read(&tgtp->xmt_fcp_release));
13585 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13586 hrq->RQ_no_posted_buf++;
13587 /* Post more buffers if possible */
13588 spin_lock_irqsave(&phba->hbalock, iflags);
13589 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13590 spin_unlock_irqrestore(&phba->hbalock, iflags);
13599 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13600 * @phba: Pointer to HBA context object.
13601 * @cq: Pointer to the completion queue.
13602 * @wcqe: Pointer to a completion queue entry.
13604 * This routine process a slow-path work-queue or receive queue completion queue
13607 * Return: true if work posted to worker thread, otherwise false.
13610 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13611 struct lpfc_cqe *cqe)
13613 struct lpfc_cqe cqevt;
13614 bool workposted = false;
13616 /* Copy the work queue CQE and convert endian order if needed */
13617 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13619 /* Check and process for different type of WCQE and dispatch */
13620 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13621 case CQE_CODE_COMPL_WQE:
13622 /* Process the WQ/RQ complete event */
13623 phba->last_completion_time = jiffies;
13624 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13625 (struct lpfc_wcqe_complete *)&cqevt);
13627 case CQE_CODE_RELEASE_WQE:
13628 /* Process the WQ release event */
13629 lpfc_sli4_sp_handle_rel_wcqe(phba,
13630 (struct lpfc_wcqe_release *)&cqevt);
13632 case CQE_CODE_XRI_ABORTED:
13633 /* Process the WQ XRI abort event */
13634 phba->last_completion_time = jiffies;
13635 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13636 (struct sli4_wcqe_xri_aborted *)&cqevt);
13638 case CQE_CODE_RECEIVE:
13639 case CQE_CODE_RECEIVE_V1:
13640 /* Process the RQ event */
13641 phba->last_completion_time = jiffies;
13642 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13643 (struct lpfc_rcqe *)&cqevt);
13646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13647 "0388 Not a valid WCQE code: x%x\n",
13648 bf_get(lpfc_cqe_code, &cqevt));
13655 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13656 * @phba: Pointer to HBA context object.
13657 * @eqe: Pointer to fast-path event queue entry.
13659 * This routine process a event queue entry from the slow-path event queue.
13660 * It will check the MajorCode and MinorCode to determine this is for a
13661 * completion event on a completion queue, if not, an error shall be logged
13662 * and just return. Otherwise, it will get to the corresponding completion
13663 * queue and process all the entries on that completion queue, rearm the
13664 * completion queue, and then return.
13668 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13669 struct lpfc_queue *speq)
13671 struct lpfc_queue *cq = NULL, *childq;
13674 /* Get the reference to the corresponding CQ */
13675 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13677 list_for_each_entry(childq, &speq->child_list, list) {
13678 if (childq->queue_id == cqid) {
13683 if (unlikely(!cq)) {
13684 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13685 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13686 "0365 Slow-path CQ identifier "
13687 "(%d) does not exist\n", cqid);
13691 /* Save EQ associated with this CQ */
13692 cq->assoc_qp = speq;
13694 if (!queue_work(phba->wq, &cq->spwork))
13695 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13696 "0390 Cannot schedule soft IRQ "
13697 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13698 cqid, cq->queue_id, smp_processor_id());
13702 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13703 * @phba: Pointer to HBA context object.
13705 * This routine process a event queue entry from the slow-path event queue.
13706 * It will check the MajorCode and MinorCode to determine this is for a
13707 * completion event on a completion queue, if not, an error shall be logged
13708 * and just return. Otherwise, it will get to the corresponding completion
13709 * queue and process all the entries on that completion queue, rearm the
13710 * completion queue, and then return.
13714 lpfc_sli4_sp_process_cq(struct work_struct *work)
13716 struct lpfc_queue *cq =
13717 container_of(work, struct lpfc_queue, spwork);
13718 struct lpfc_hba *phba = cq->phba;
13719 struct lpfc_cqe *cqe;
13720 bool workposted = false;
13723 /* Process all the entries to the CQ */
13724 switch (cq->type) {
13726 while ((cqe = lpfc_sli4_cq_get(cq))) {
13727 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13728 if (!(++ccount % cq->entry_repost))
13734 while ((cqe = lpfc_sli4_cq_get(cq))) {
13735 if (cq->subtype == LPFC_FCP ||
13736 cq->subtype == LPFC_NVME) {
13737 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13738 if (phba->ktime_on)
13739 cq->isr_timestamp = ktime_get_ns();
13741 cq->isr_timestamp = 0;
13743 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13746 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13749 if (!(++ccount % cq->entry_repost))
13753 /* Track the max number of CQEs processed in 1 EQ */
13754 if (ccount > cq->CQ_max_cqe)
13755 cq->CQ_max_cqe = ccount;
13758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13759 "0370 Invalid completion queue type (%d)\n",
13764 /* Catch the no cq entry condition, log an error */
13765 if (unlikely(ccount == 0))
13766 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13767 "0371 No entry from the CQ: identifier "
13768 "(x%x), type (%d)\n", cq->queue_id, cq->type);
13770 /* In any case, flash and re-arm the RCQ */
13771 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13773 /* wake up worker thread if there are works to be done */
13775 lpfc_worker_wake_up(phba);
13779 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13780 * @phba: Pointer to HBA context object.
13781 * @cq: Pointer to associated CQ
13782 * @wcqe: Pointer to work-queue completion queue entry.
13784 * This routine process a fast-path work queue completion entry from fast-path
13785 * event queue for FCP command response completion.
13788 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13789 struct lpfc_wcqe_complete *wcqe)
13791 struct lpfc_sli_ring *pring = cq->pring;
13792 struct lpfc_iocbq *cmdiocbq;
13793 struct lpfc_iocbq irspiocbq;
13794 unsigned long iflags;
13796 /* Check for response status */
13797 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13798 /* If resource errors reported from HBA, reduce queue
13799 * depth of the SCSI device.
13801 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13802 IOSTAT_LOCAL_REJECT)) &&
13803 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13804 IOERR_NO_RESOURCES))
13805 phba->lpfc_rampdown_queue_depth(phba);
13807 /* Log the error status */
13808 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13809 "0373 FCP CQE error: status=x%x: "
13810 "CQE: %08x %08x %08x %08x\n",
13811 bf_get(lpfc_wcqe_c_status, wcqe),
13812 wcqe->word0, wcqe->total_data_placed,
13813 wcqe->parameter, wcqe->word3);
13816 /* Look up the FCP command IOCB and create pseudo response IOCB */
13817 spin_lock_irqsave(&pring->ring_lock, iflags);
13818 pring->stats.iocb_event++;
13819 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13820 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13821 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13822 if (unlikely(!cmdiocbq)) {
13823 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13824 "0374 FCP complete with no corresponding "
13825 "cmdiocb: iotag (%d)\n",
13826 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13829 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13830 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13832 if (cmdiocbq->iocb_cmpl == NULL) {
13833 if (cmdiocbq->wqe_cmpl) {
13834 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13835 spin_lock_irqsave(&phba->hbalock, iflags);
13836 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13837 spin_unlock_irqrestore(&phba->hbalock, iflags);
13840 /* Pass the cmd_iocb and the wcqe to the upper layer */
13841 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13844 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13845 "0375 FCP cmdiocb not callback function "
13847 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13851 /* Fake the irspiocb and copy necessary response information */
13852 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13854 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13855 spin_lock_irqsave(&phba->hbalock, iflags);
13856 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13857 spin_unlock_irqrestore(&phba->hbalock, iflags);
13860 /* Pass the cmd_iocb and the rsp state to the upper layer */
13861 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13865 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13866 * @phba: Pointer to HBA context object.
13867 * @cq: Pointer to completion queue.
13868 * @wcqe: Pointer to work-queue completion queue entry.
13870 * This routine handles an fast-path WQ entry consumed event by invoking the
13871 * proper WQ release routine to the slow-path WQ.
13874 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13875 struct lpfc_wcqe_release *wcqe)
13877 struct lpfc_queue *childwq;
13878 bool wqid_matched = false;
13881 /* Check for fast-path FCP work queue release */
13882 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13883 list_for_each_entry(childwq, &cq->child_list, list) {
13884 if (childwq->queue_id == hba_wqid) {
13885 lpfc_sli4_wq_release(childwq,
13886 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13887 if (childwq->q_flag & HBA_NVMET_WQFULL)
13888 lpfc_nvmet_wqfull_process(phba, childwq);
13889 wqid_matched = true;
13893 /* Report warning log message if no match found */
13894 if (wqid_matched != true)
13895 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13896 "2580 Fast-path wqe consume event carries "
13897 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13901 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13902 * @phba: Pointer to HBA context object.
13903 * @rcqe: Pointer to receive-queue completion queue entry.
13905 * This routine process a receive-queue completion queue entry.
13907 * Return: true if work posted to worker thread, otherwise false.
13910 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13911 struct lpfc_rcqe *rcqe)
13913 bool workposted = false;
13914 struct lpfc_queue *hrq;
13915 struct lpfc_queue *drq;
13916 struct rqb_dmabuf *dma_buf;
13917 struct fc_frame_header *fc_hdr;
13918 struct lpfc_nvmet_tgtport *tgtp;
13919 uint32_t status, rq_id;
13920 unsigned long iflags;
13921 uint32_t fctl, idx;
13923 if ((phba->nvmet_support == 0) ||
13924 (phba->sli4_hba.nvmet_cqset == NULL))
13927 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13928 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13929 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13931 /* sanity check on queue memory */
13932 if (unlikely(!hrq) || unlikely(!drq))
13935 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13936 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13938 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13940 if ((phba->nvmet_support == 0) ||
13941 (rq_id != hrq->queue_id))
13944 status = bf_get(lpfc_rcqe_status, rcqe);
13946 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13947 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13948 "6126 Receive Frame Truncated!!\n");
13950 case FC_STATUS_RQ_SUCCESS:
13951 spin_lock_irqsave(&phba->hbalock, iflags);
13952 lpfc_sli4_rq_release(hrq, drq);
13953 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13955 hrq->RQ_no_buf_found++;
13956 spin_unlock_irqrestore(&phba->hbalock, iflags);
13959 spin_unlock_irqrestore(&phba->hbalock, iflags);
13961 hrq->RQ_buf_posted--;
13962 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13964 /* Just some basic sanity checks on FCP Command frame */
13965 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13966 fc_hdr->fh_f_ctl[1] << 8 |
13967 fc_hdr->fh_f_ctl[2]);
13969 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13970 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13971 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13974 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13975 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13976 lpfc_nvmet_unsol_fcp_event(
13977 phba, idx, dma_buf,
13978 cq->isr_timestamp);
13982 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13984 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13985 if (phba->nvmet_support) {
13986 tgtp = phba->targetport->private;
13987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13988 "6401 RQE Error x%x, posted %d err_cnt "
13990 status, hrq->RQ_buf_posted,
13991 hrq->RQ_no_posted_buf,
13992 atomic_read(&tgtp->rcv_fcp_cmd_in),
13993 atomic_read(&tgtp->rcv_fcp_cmd_out),
13994 atomic_read(&tgtp->xmt_fcp_release));
13998 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13999 hrq->RQ_no_posted_buf++;
14000 /* Post more buffers if possible */
14008 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14009 * @cq: Pointer to the completion queue.
14010 * @eqe: Pointer to fast-path completion queue entry.
14012 * This routine process a fast-path work queue completion entry from fast-path
14013 * event queue for FCP command response completion.
14016 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14017 struct lpfc_cqe *cqe)
14019 struct lpfc_wcqe_release wcqe;
14020 bool workposted = false;
14022 /* Copy the work queue CQE and convert endian order if needed */
14023 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14025 /* Check and process for different type of WCQE and dispatch */
14026 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14027 case CQE_CODE_COMPL_WQE:
14028 case CQE_CODE_NVME_ERSP:
14030 /* Process the WQ complete event */
14031 phba->last_completion_time = jiffies;
14032 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14033 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14034 (struct lpfc_wcqe_complete *)&wcqe);
14035 if (cq->subtype == LPFC_NVME_LS)
14036 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14037 (struct lpfc_wcqe_complete *)&wcqe);
14039 case CQE_CODE_RELEASE_WQE:
14040 cq->CQ_release_wqe++;
14041 /* Process the WQ release event */
14042 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14043 (struct lpfc_wcqe_release *)&wcqe);
14045 case CQE_CODE_XRI_ABORTED:
14046 cq->CQ_xri_aborted++;
14047 /* Process the WQ XRI abort event */
14048 phba->last_completion_time = jiffies;
14049 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14050 (struct sli4_wcqe_xri_aborted *)&wcqe);
14052 case CQE_CODE_RECEIVE_V1:
14053 case CQE_CODE_RECEIVE:
14054 phba->last_completion_time = jiffies;
14055 if (cq->subtype == LPFC_NVMET) {
14056 workposted = lpfc_sli4_nvmet_handle_rcqe(
14057 phba, cq, (struct lpfc_rcqe *)&wcqe);
14061 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14062 "0144 Not a valid CQE code: x%x\n",
14063 bf_get(lpfc_wcqe_c_code, &wcqe));
14070 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14071 * @phba: Pointer to HBA context object.
14072 * @eqe: Pointer to fast-path event queue entry.
14074 * This routine process a event queue entry from the fast-path event queue.
14075 * It will check the MajorCode and MinorCode to determine this is for a
14076 * completion event on a completion queue, if not, an error shall be logged
14077 * and just return. Otherwise, it will get to the corresponding completion
14078 * queue and process all the entries on the completion queue, rearm the
14079 * completion queue, and then return.
14082 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14085 struct lpfc_queue *cq = NULL;
14088 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14089 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14090 "0366 Not a valid completion "
14091 "event: majorcode=x%x, minorcode=x%x\n",
14092 bf_get_le32(lpfc_eqe_major_code, eqe),
14093 bf_get_le32(lpfc_eqe_minor_code, eqe));
14097 /* Get the reference to the corresponding CQ */
14098 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14100 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14101 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14102 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14103 /* Process NVMET unsol rcv */
14104 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14109 if (phba->sli4_hba.nvme_cq_map &&
14110 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
14111 /* Process NVME / NVMET command completion */
14112 cq = phba->sli4_hba.nvme_cq[qidx];
14116 if (phba->sli4_hba.fcp_cq_map &&
14117 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
14118 /* Process FCP command completion */
14119 cq = phba->sli4_hba.fcp_cq[qidx];
14123 if (phba->sli4_hba.nvmels_cq &&
14124 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14125 /* Process NVME unsol rcv */
14126 cq = phba->sli4_hba.nvmels_cq;
14129 /* Otherwise this is a Slow path event */
14131 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
14136 if (unlikely(cqid != cq->queue_id)) {
14137 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14138 "0368 Miss-matched fast-path completion "
14139 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14140 cqid, cq->queue_id);
14144 /* Save EQ associated with this CQ */
14145 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
14147 if (!queue_work(phba->wq, &cq->irqwork))
14148 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14149 "0363 Cannot schedule soft IRQ "
14150 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14151 cqid, cq->queue_id, smp_processor_id());
14155 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14156 * @phba: Pointer to HBA context object.
14157 * @eqe: Pointer to fast-path event queue entry.
14159 * This routine process a event queue entry from the fast-path event queue.
14160 * It will check the MajorCode and MinorCode to determine this is for a
14161 * completion event on a completion queue, if not, an error shall be logged
14162 * and just return. Otherwise, it will get to the corresponding completion
14163 * queue and process all the entries on the completion queue, rearm the
14164 * completion queue, and then return.
14167 lpfc_sli4_hba_process_cq(struct work_struct *work)
14169 struct lpfc_queue *cq =
14170 container_of(work, struct lpfc_queue, irqwork);
14171 struct lpfc_hba *phba = cq->phba;
14172 struct lpfc_cqe *cqe;
14173 bool workposted = false;
14176 /* Process all the entries to the CQ */
14177 while ((cqe = lpfc_sli4_cq_get(cq))) {
14178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14179 if (phba->ktime_on)
14180 cq->isr_timestamp = ktime_get_ns();
14182 cq->isr_timestamp = 0;
14184 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
14185 if (!(++ccount % cq->entry_repost))
14189 /* Track the max number of CQEs processed in 1 EQ */
14190 if (ccount > cq->CQ_max_cqe)
14191 cq->CQ_max_cqe = ccount;
14192 cq->assoc_qp->EQ_cqe_cnt += ccount;
14194 /* Catch the no cq entry condition */
14195 if (unlikely(ccount == 0))
14196 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14197 "0369 No entry from fast-path completion "
14198 "queue fcpcqid=%d\n", cq->queue_id);
14200 /* In any case, flash and re-arm the CQ */
14201 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
14203 /* wake up worker thread if there are works to be done */
14205 lpfc_worker_wake_up(phba);
14209 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
14211 struct lpfc_eqe *eqe;
14213 /* walk all the EQ entries and drop on the floor */
14214 while ((eqe = lpfc_sli4_eq_get(eq)))
14217 /* Clear and re-arm the EQ */
14218 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
14223 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
14225 * @phba: Pointer to HBA context object.
14226 * @eqe: Pointer to fast-path event queue entry.
14228 * This routine process a event queue entry from the Flash Optimized Fabric
14229 * event queue. It will check the MajorCode and MinorCode to determine this
14230 * is for a completion event on a completion queue, if not, an error shall be
14231 * logged and just return. Otherwise, it will get to the corresponding
14232 * completion queue and process all the entries on the completion queue, rearm
14233 * the completion queue, and then return.
14236 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
14238 struct lpfc_queue *cq;
14241 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14242 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14243 "9147 Not a valid completion "
14244 "event: majorcode=x%x, minorcode=x%x\n",
14245 bf_get_le32(lpfc_eqe_major_code, eqe),
14246 bf_get_le32(lpfc_eqe_minor_code, eqe));
14250 /* Get the reference to the corresponding CQ */
14251 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14253 /* Next check for OAS */
14254 cq = phba->sli4_hba.oas_cq;
14255 if (unlikely(!cq)) {
14256 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14258 "9148 OAS completion queue "
14259 "does not exist\n");
14263 if (unlikely(cqid != cq->queue_id)) {
14264 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14265 "9149 Miss-matched fast-path compl "
14266 "queue id: eqcqid=%d, fcpcqid=%d\n",
14267 cqid, cq->queue_id);
14271 /* Save EQ associated with this CQ */
14272 cq->assoc_qp = phba->sli4_hba.fof_eq;
14274 /* CQ work will be processed on CPU affinitized to this IRQ */
14275 if (!queue_work(phba->wq, &cq->irqwork))
14276 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14277 "0367 Cannot schedule soft IRQ "
14278 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14279 cqid, cq->queue_id, smp_processor_id());
14283 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
14284 * @irq: Interrupt number.
14285 * @dev_id: The device context pointer.
14287 * This function is directly called from the PCI layer as an interrupt
14288 * service routine when device with SLI-4 interface spec is enabled with
14289 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
14290 * IOCB ring event in the HBA. However, when the device is enabled with either
14291 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14292 * device-level interrupt handler. When the PCI slot is in error recovery
14293 * or the HBA is undergoing initialization, the interrupt handler will not
14294 * process the interrupt. The Flash Optimized Fabric ring event are handled in
14295 * the intrrupt context. This function is called without any lock held.
14296 * It gets the hbalock to access and update SLI data structures. Note that,
14297 * the EQ to CQ are one-to-one map such that the EQ index is
14298 * equal to that of CQ index.
14300 * This function returns IRQ_HANDLED when interrupt is handled else it
14301 * returns IRQ_NONE.
14304 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
14306 struct lpfc_hba *phba;
14307 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14308 struct lpfc_queue *eq;
14309 struct lpfc_eqe *eqe;
14310 unsigned long iflag;
14313 /* Get the driver's phba structure from the dev_id */
14314 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14315 phba = hba_eq_hdl->phba;
14317 if (unlikely(!phba))
14320 /* Get to the EQ struct associated with this vector */
14321 eq = phba->sli4_hba.fof_eq;
14325 /* Check device state for handling interrupt */
14326 if (unlikely(lpfc_intr_state_check(phba))) {
14327 /* Check again for link_state with lock held */
14328 spin_lock_irqsave(&phba->hbalock, iflag);
14329 if (phba->link_state < LPFC_LINK_DOWN)
14330 /* Flush, clear interrupt, and rearm the EQ */
14331 lpfc_sli4_eq_flush(phba, eq);
14332 spin_unlock_irqrestore(&phba->hbalock, iflag);
14337 * Process all the event on FCP fast-path EQ
14339 while ((eqe = lpfc_sli4_eq_get(eq))) {
14340 lpfc_sli4_fof_handle_eqe(phba, eqe);
14341 if (!(++ecount % eq->entry_repost))
14343 eq->EQ_processed++;
14346 /* Track the max number of EQEs processed in 1 intr */
14347 if (ecount > eq->EQ_max_eqe)
14348 eq->EQ_max_eqe = ecount;
14351 if (unlikely(ecount == 0)) {
14354 if (phba->intr_type == MSIX)
14355 /* MSI-X treated interrupt served as no EQ share INT */
14356 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14357 "9145 MSI-X interrupt with no EQE\n");
14359 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14360 "9146 ISR interrupt with no EQE\n");
14361 /* Non MSI-X treated on interrupt as EQ share INT */
14365 /* Always clear and re-arm the fast-path EQ */
14366 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
14367 return IRQ_HANDLED;
14371 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14372 * @irq: Interrupt number.
14373 * @dev_id: The device context pointer.
14375 * This function is directly called from the PCI layer as an interrupt
14376 * service routine when device with SLI-4 interface spec is enabled with
14377 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14378 * ring event in the HBA. However, when the device is enabled with either
14379 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14380 * device-level interrupt handler. When the PCI slot is in error recovery
14381 * or the HBA is undergoing initialization, the interrupt handler will not
14382 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14383 * the intrrupt context. This function is called without any lock held.
14384 * It gets the hbalock to access and update SLI data structures. Note that,
14385 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14386 * equal to that of FCP CQ index.
14388 * The link attention and ELS ring attention events are handled
14389 * by the worker thread. The interrupt handler signals the worker thread
14390 * and returns for these events. This function is called without any lock
14391 * held. It gets the hbalock to access and update SLI data structures.
14393 * This function returns IRQ_HANDLED when interrupt is handled else it
14394 * returns IRQ_NONE.
14397 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14399 struct lpfc_hba *phba;
14400 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14401 struct lpfc_queue *fpeq;
14402 struct lpfc_eqe *eqe;
14403 unsigned long iflag;
14407 /* Get the driver's phba structure from the dev_id */
14408 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14409 phba = hba_eq_hdl->phba;
14410 hba_eqidx = hba_eq_hdl->idx;
14412 if (unlikely(!phba))
14414 if (unlikely(!phba->sli4_hba.hba_eq))
14417 /* Get to the EQ struct associated with this vector */
14418 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
14419 if (unlikely(!fpeq))
14422 if (lpfc_fcp_look_ahead) {
14423 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
14424 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
14426 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14431 /* Check device state for handling interrupt */
14432 if (unlikely(lpfc_intr_state_check(phba))) {
14433 /* Check again for link_state with lock held */
14434 spin_lock_irqsave(&phba->hbalock, iflag);
14435 if (phba->link_state < LPFC_LINK_DOWN)
14436 /* Flush, clear interrupt, and rearm the EQ */
14437 lpfc_sli4_eq_flush(phba, fpeq);
14438 spin_unlock_irqrestore(&phba->hbalock, iflag);
14439 if (lpfc_fcp_look_ahead)
14440 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14445 * Process all the event on FCP fast-path EQ
14447 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
14448 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
14449 if (!(++ecount % fpeq->entry_repost))
14451 fpeq->EQ_processed++;
14454 /* Track the max number of EQEs processed in 1 intr */
14455 if (ecount > fpeq->EQ_max_eqe)
14456 fpeq->EQ_max_eqe = ecount;
14458 /* Always clear and re-arm the fast-path EQ */
14459 phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
14461 if (unlikely(ecount == 0)) {
14462 fpeq->EQ_no_entry++;
14464 if (lpfc_fcp_look_ahead) {
14465 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14469 if (phba->intr_type == MSIX)
14470 /* MSI-X treated interrupt served as no EQ share INT */
14471 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14472 "0358 MSI-X interrupt with no EQE\n");
14474 /* Non MSI-X treated on interrupt as EQ share INT */
14478 if (lpfc_fcp_look_ahead)
14479 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14481 return IRQ_HANDLED;
14482 } /* lpfc_sli4_fp_intr_handler */
14485 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14486 * @irq: Interrupt number.
14487 * @dev_id: The device context pointer.
14489 * This function is the device-level interrupt handler to device with SLI-4
14490 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14491 * interrupt mode is enabled and there is an event in the HBA which requires
14492 * driver attention. This function invokes the slow-path interrupt attention
14493 * handling function and fast-path interrupt attention handling function in
14494 * turn to process the relevant HBA attention events. This function is called
14495 * without any lock held. It gets the hbalock to access and update SLI data
14498 * This function returns IRQ_HANDLED when interrupt is handled, else it
14499 * returns IRQ_NONE.
14502 lpfc_sli4_intr_handler(int irq, void *dev_id)
14504 struct lpfc_hba *phba;
14505 irqreturn_t hba_irq_rc;
14506 bool hba_handled = false;
14509 /* Get the driver's phba structure from the dev_id */
14510 phba = (struct lpfc_hba *)dev_id;
14512 if (unlikely(!phba))
14516 * Invoke fast-path host attention interrupt handling as appropriate.
14518 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
14519 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14520 &phba->sli4_hba.hba_eq_hdl[qidx]);
14521 if (hba_irq_rc == IRQ_HANDLED)
14522 hba_handled |= true;
14525 if (phba->cfg_fof) {
14526 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
14527 &phba->sli4_hba.hba_eq_hdl[qidx]);
14528 if (hba_irq_rc == IRQ_HANDLED)
14529 hba_handled |= true;
14532 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14533 } /* lpfc_sli4_intr_handler */
14536 * lpfc_sli4_queue_free - free a queue structure and associated memory
14537 * @queue: The queue structure to free.
14539 * This function frees a queue structure and the DMAable memory used for
14540 * the host resident queue. This function must be called after destroying the
14541 * queue on the HBA.
14544 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14546 struct lpfc_dmabuf *dmabuf;
14551 while (!list_empty(&queue->page_list)) {
14552 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14554 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14555 dmabuf->virt, dmabuf->phys);
14559 lpfc_free_rq_buffer(queue->phba, queue);
14560 kfree(queue->rqbp);
14563 if (!list_empty(&queue->wq_list))
14564 list_del(&queue->wq_list);
14571 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14572 * @phba: The HBA that this queue is being created on.
14573 * @page_size: The size of a queue page
14574 * @entry_size: The size of each queue entry for this queue.
14575 * @entry count: The number of entries that this queue will handle.
14577 * This function allocates a queue structure and the DMAable memory used for
14578 * the host resident queue. This function must be called before creating the
14579 * queue on the HBA.
14581 struct lpfc_queue *
14582 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14583 uint32_t entry_size, uint32_t entry_count)
14585 struct lpfc_queue *queue;
14586 struct lpfc_dmabuf *dmabuf;
14587 int x, total_qe_count;
14589 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14591 if (!phba->sli4_hba.pc_sli4_params.supported)
14592 hw_page_size = page_size;
14594 queue = kzalloc(sizeof(struct lpfc_queue) +
14595 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14598 queue->page_count = (ALIGN(entry_size * entry_count,
14599 hw_page_size))/hw_page_size;
14601 /* If needed, Adjust page count to match the max the adapter supports */
14602 if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
14603 (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
14604 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14606 INIT_LIST_HEAD(&queue->list);
14607 INIT_LIST_HEAD(&queue->wq_list);
14608 INIT_LIST_HEAD(&queue->wqfull_list);
14609 INIT_LIST_HEAD(&queue->page_list);
14610 INIT_LIST_HEAD(&queue->child_list);
14612 /* Set queue parameters now. If the system cannot provide memory
14613 * resources, the free routine needs to know what was allocated.
14615 queue->entry_size = entry_size;
14616 queue->entry_count = entry_count;
14617 queue->page_size = hw_page_size;
14618 queue->phba = phba;
14620 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14621 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14624 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
14625 hw_page_size, &dmabuf->phys,
14627 if (!dmabuf->virt) {
14631 dmabuf->buffer_tag = x;
14632 list_add_tail(&dmabuf->list, &queue->page_list);
14633 /* initialize queue's entry array */
14634 dma_pointer = dmabuf->virt;
14635 for (; total_qe_count < entry_count &&
14636 dma_pointer < (hw_page_size + dmabuf->virt);
14637 total_qe_count++, dma_pointer += entry_size) {
14638 queue->qe[total_qe_count].address = dma_pointer;
14641 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14642 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14644 /* entry_repost will be set during q creation */
14648 lpfc_sli4_queue_free(queue);
14653 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14654 * @phba: HBA structure that indicates port to create a queue on.
14655 * @pci_barset: PCI BAR set flag.
14657 * This function shall perform iomap of the specified PCI BAR address to host
14658 * memory address if not already done so and return it. The returned host
14659 * memory address can be NULL.
14661 static void __iomem *
14662 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14667 switch (pci_barset) {
14668 case WQ_PCI_BAR_0_AND_1:
14669 return phba->pci_bar0_memmap_p;
14670 case WQ_PCI_BAR_2_AND_3:
14671 return phba->pci_bar2_memmap_p;
14672 case WQ_PCI_BAR_4_AND_5:
14673 return phba->pci_bar4_memmap_p;
14681 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
14682 * @phba: HBA structure that indicates port to create a queue on.
14683 * @startq: The starting FCP EQ to modify
14685 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
14686 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
14687 * updated in one mailbox command.
14689 * The @phba struct is used to send mailbox command to HBA. The @startq
14690 * is used to get the starting FCP EQ to change.
14691 * This function is asynchronous and will wait for the mailbox
14692 * command to finish before continuing.
14694 * On success this function will return a zero. If unable to allocate enough
14695 * memory this function will return -ENOMEM. If the queue create mailbox command
14696 * fails this function will return -ENXIO.
14699 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14700 uint32_t numq, uint32_t imax)
14702 struct lpfc_mbx_modify_eq_delay *eq_delay;
14703 LPFC_MBOXQ_t *mbox;
14704 struct lpfc_queue *eq;
14705 int cnt, rc, length, status = 0;
14706 uint32_t shdr_status, shdr_add_status;
14707 uint32_t result, val;
14709 union lpfc_sli4_cfg_shdr *shdr;
14712 if (startq >= phba->io_channel_irqs)
14715 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14718 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14719 sizeof(struct lpfc_sli4_cfg_mhdr));
14720 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14721 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14722 length, LPFC_SLI4_MBX_EMBED);
14723 eq_delay = &mbox->u.mqe.un.eq_delay;
14725 /* Calculate delay multiper from maximum interrupt per second */
14726 result = imax / phba->io_channel_irqs;
14727 if (result > LPFC_DMULT_CONST || result == 0)
14730 dmult = LPFC_DMULT_CONST/result - 1;
14731 if (dmult > LPFC_DMULT_MAX)
14732 dmult = LPFC_DMULT_MAX;
14735 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14736 eq = phba->sli4_hba.hba_eq[qidx];
14740 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14741 eq_delay->u.request.eq[cnt].phase = 0;
14742 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14745 /* q_mode is only used for auto_imax */
14746 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14747 /* Use EQ Delay Register method for q_mode */
14749 /* Convert for EQ Delay register */
14750 val = phba->cfg_fcp_imax;
14752 /* First, interrupts per sec per EQ */
14753 val = phba->cfg_fcp_imax /
14754 phba->io_channel_irqs;
14756 /* us delay between each interrupt */
14757 val = LPFC_SEC_TO_USEC / val;
14767 eq_delay->u.request.num_eq = cnt;
14769 mbox->vport = phba->pport;
14770 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14771 mbox->ctx_buf = NULL;
14772 mbox->ctx_ndlp = NULL;
14773 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14774 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14775 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14776 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14777 if (shdr_status || shdr_add_status || rc) {
14778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14779 "2512 MODIFY_EQ_DELAY mailbox failed with "
14780 "status x%x add_status x%x, mbx status x%x\n",
14781 shdr_status, shdr_add_status, rc);
14784 mempool_free(mbox, phba->mbox_mem_pool);
14789 * lpfc_eq_create - Create an Event Queue on the HBA
14790 * @phba: HBA structure that indicates port to create a queue on.
14791 * @eq: The queue structure to use to create the event queue.
14792 * @imax: The maximum interrupt per second limit.
14794 * This function creates an event queue, as detailed in @eq, on a port,
14795 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14797 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14798 * is used to get the entry count and entry size that are necessary to
14799 * determine the number of pages to allocate and use for this queue. This
14800 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14801 * event queue. This function is asynchronous and will wait for the mailbox
14802 * command to finish before continuing.
14804 * On success this function will return a zero. If unable to allocate enough
14805 * memory this function will return -ENOMEM. If the queue create mailbox command
14806 * fails this function will return -ENXIO.
14809 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14811 struct lpfc_mbx_eq_create *eq_create;
14812 LPFC_MBOXQ_t *mbox;
14813 int rc, length, status = 0;
14814 struct lpfc_dmabuf *dmabuf;
14815 uint32_t shdr_status, shdr_add_status;
14816 union lpfc_sli4_cfg_shdr *shdr;
14818 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14820 /* sanity check on queue memory */
14823 if (!phba->sli4_hba.pc_sli4_params.supported)
14824 hw_page_size = SLI4_PAGE_SIZE;
14826 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14829 length = (sizeof(struct lpfc_mbx_eq_create) -
14830 sizeof(struct lpfc_sli4_cfg_mhdr));
14831 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14832 LPFC_MBOX_OPCODE_EQ_CREATE,
14833 length, LPFC_SLI4_MBX_EMBED);
14834 eq_create = &mbox->u.mqe.un.eq_create;
14835 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14836 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14838 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14840 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14842 /* Use version 2 of CREATE_EQ if eqav is set */
14843 if (phba->sli4_hba.pc_sli4_params.eqav) {
14844 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14845 LPFC_Q_CREATE_VERSION_2);
14846 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14847 phba->sli4_hba.pc_sli4_params.eqav);
14850 /* don't setup delay multiplier using EQ_CREATE */
14852 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14854 switch (eq->entry_count) {
14856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14857 "0360 Unsupported EQ count. (%d)\n",
14859 if (eq->entry_count < 256)
14861 /* otherwise default to smallest count (drop through) */
14863 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14867 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14871 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14875 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14879 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14883 list_for_each_entry(dmabuf, &eq->page_list, list) {
14884 memset(dmabuf->virt, 0, hw_page_size);
14885 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14886 putPaddrLow(dmabuf->phys);
14887 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14888 putPaddrHigh(dmabuf->phys);
14890 mbox->vport = phba->pport;
14891 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14892 mbox->ctx_buf = NULL;
14893 mbox->ctx_ndlp = NULL;
14894 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14895 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14896 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14897 if (shdr_status || shdr_add_status || rc) {
14898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14899 "2500 EQ_CREATE mailbox failed with "
14900 "status x%x add_status x%x, mbx status x%x\n",
14901 shdr_status, shdr_add_status, rc);
14904 eq->type = LPFC_EQ;
14905 eq->subtype = LPFC_NONE;
14906 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14907 if (eq->queue_id == 0xFFFF)
14909 eq->host_index = 0;
14911 eq->entry_repost = LPFC_EQ_REPOST;
14913 mempool_free(mbox, phba->mbox_mem_pool);
14918 * lpfc_cq_create - Create a Completion Queue on the HBA
14919 * @phba: HBA structure that indicates port to create a queue on.
14920 * @cq: The queue structure to use to create the completion queue.
14921 * @eq: The event queue to bind this completion queue to.
14923 * This function creates a completion queue, as detailed in @wq, on a port,
14924 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14926 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14927 * is used to get the entry count and entry size that are necessary to
14928 * determine the number of pages to allocate and use for this queue. The @eq
14929 * is used to indicate which event queue to bind this completion queue to. This
14930 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14931 * completion queue. This function is asynchronous and will wait for the mailbox
14932 * command to finish before continuing.
14934 * On success this function will return a zero. If unable to allocate enough
14935 * memory this function will return -ENOMEM. If the queue create mailbox command
14936 * fails this function will return -ENXIO.
14939 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14940 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14942 struct lpfc_mbx_cq_create *cq_create;
14943 struct lpfc_dmabuf *dmabuf;
14944 LPFC_MBOXQ_t *mbox;
14945 int rc, length, status = 0;
14946 uint32_t shdr_status, shdr_add_status;
14947 union lpfc_sli4_cfg_shdr *shdr;
14949 /* sanity check on queue memory */
14953 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14956 length = (sizeof(struct lpfc_mbx_cq_create) -
14957 sizeof(struct lpfc_sli4_cfg_mhdr));
14958 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14959 LPFC_MBOX_OPCODE_CQ_CREATE,
14960 length, LPFC_SLI4_MBX_EMBED);
14961 cq_create = &mbox->u.mqe.un.cq_create;
14962 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14963 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14965 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14966 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14967 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14968 phba->sli4_hba.pc_sli4_params.cqv);
14969 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14970 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14971 (cq->page_size / SLI4_PAGE_SIZE));
14972 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14974 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14975 phba->sli4_hba.pc_sli4_params.cqav);
14977 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14980 switch (cq->entry_count) {
14983 if (phba->sli4_hba.pc_sli4_params.cqv ==
14984 LPFC_Q_CREATE_VERSION_2) {
14985 cq_create->u.request.context.lpfc_cq_context_count =
14987 bf_set(lpfc_cq_context_count,
14988 &cq_create->u.request.context,
14989 LPFC_CQ_CNT_WORD7);
14994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14995 "0361 Unsupported CQ count: "
14996 "entry cnt %d sz %d pg cnt %d\n",
14997 cq->entry_count, cq->entry_size,
14999 if (cq->entry_count < 256) {
15003 /* otherwise default to smallest count (drop through) */
15005 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15009 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15013 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15017 list_for_each_entry(dmabuf, &cq->page_list, list) {
15018 memset(dmabuf->virt, 0, cq->page_size);
15019 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15020 putPaddrLow(dmabuf->phys);
15021 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15022 putPaddrHigh(dmabuf->phys);
15024 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15026 /* The IOCTL status is embedded in the mailbox subheader. */
15027 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15028 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15029 if (shdr_status || shdr_add_status || rc) {
15030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15031 "2501 CQ_CREATE mailbox failed with "
15032 "status x%x add_status x%x, mbx status x%x\n",
15033 shdr_status, shdr_add_status, rc);
15037 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15038 if (cq->queue_id == 0xFFFF) {
15042 /* link the cq onto the parent eq child list */
15043 list_add_tail(&cq->list, &eq->child_list);
15044 /* Set up completion queue's type and subtype */
15046 cq->subtype = subtype;
15047 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15048 cq->assoc_qid = eq->queue_id;
15049 cq->host_index = 0;
15051 cq->entry_repost = LPFC_CQ_REPOST;
15054 mempool_free(mbox, phba->mbox_mem_pool);
15059 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15060 * @phba: HBA structure that indicates port to create a queue on.
15061 * @cqp: The queue structure array to use to create the completion queues.
15062 * @eqp: The event queue array to bind these completion queues to.
15064 * This function creates a set of completion queue, s to support MRQ
15065 * as detailed in @cqp, on a port,
15066 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15068 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15069 * is used to get the entry count and entry size that are necessary to
15070 * determine the number of pages to allocate and use for this queue. The @eq
15071 * is used to indicate which event queue to bind this completion queue to. This
15072 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15073 * completion queue. This function is asynchronous and will wait for the mailbox
15074 * command to finish before continuing.
15076 * On success this function will return a zero. If unable to allocate enough
15077 * memory this function will return -ENOMEM. If the queue create mailbox command
15078 * fails this function will return -ENXIO.
15081 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15082 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
15084 struct lpfc_queue *cq;
15085 struct lpfc_queue *eq;
15086 struct lpfc_mbx_cq_create_set *cq_set;
15087 struct lpfc_dmabuf *dmabuf;
15088 LPFC_MBOXQ_t *mbox;
15089 int rc, length, alloclen, status = 0;
15090 int cnt, idx, numcq, page_idx = 0;
15091 uint32_t shdr_status, shdr_add_status;
15092 union lpfc_sli4_cfg_shdr *shdr;
15093 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15095 /* sanity check on queue memory */
15096 numcq = phba->cfg_nvmet_mrq;
15097 if (!cqp || !eqp || !numcq)
15100 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15104 length = sizeof(struct lpfc_mbx_cq_create_set);
15105 length += ((numcq * cqp[0]->page_count) *
15106 sizeof(struct dma_address));
15107 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15108 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15109 LPFC_SLI4_MBX_NEMBED);
15110 if (alloclen < length) {
15111 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15112 "3098 Allocated DMA memory size (%d) is "
15113 "less than the requested DMA memory size "
15114 "(%d)\n", alloclen, length);
15118 cq_set = mbox->sge_array->addr[0];
15119 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15120 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15122 for (idx = 0; idx < numcq; idx++) {
15129 if (!phba->sli4_hba.pc_sli4_params.supported)
15130 hw_page_size = cq->page_size;
15134 bf_set(lpfc_mbx_cq_create_set_page_size,
15135 &cq_set->u.request,
15136 (hw_page_size / SLI4_PAGE_SIZE));
15137 bf_set(lpfc_mbx_cq_create_set_num_pages,
15138 &cq_set->u.request, cq->page_count);
15139 bf_set(lpfc_mbx_cq_create_set_evt,
15140 &cq_set->u.request, 1);
15141 bf_set(lpfc_mbx_cq_create_set_valid,
15142 &cq_set->u.request, 1);
15143 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15144 &cq_set->u.request, 0);
15145 bf_set(lpfc_mbx_cq_create_set_num_cq,
15146 &cq_set->u.request, numcq);
15147 bf_set(lpfc_mbx_cq_create_set_autovalid,
15148 &cq_set->u.request,
15149 phba->sli4_hba.pc_sli4_params.cqav);
15150 switch (cq->entry_count) {
15153 if (phba->sli4_hba.pc_sli4_params.cqv ==
15154 LPFC_Q_CREATE_VERSION_2) {
15155 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15156 &cq_set->u.request,
15158 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15159 &cq_set->u.request,
15160 LPFC_CQ_CNT_WORD7);
15165 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15166 "3118 Bad CQ count. (%d)\n",
15168 if (cq->entry_count < 256) {
15172 /* otherwise default to smallest (drop thru) */
15174 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15175 &cq_set->u.request, LPFC_CQ_CNT_256);
15178 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15179 &cq_set->u.request, LPFC_CQ_CNT_512);
15182 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15183 &cq_set->u.request, LPFC_CQ_CNT_1024);
15186 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15187 &cq_set->u.request, eq->queue_id);
15190 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15191 &cq_set->u.request, eq->queue_id);
15194 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15195 &cq_set->u.request, eq->queue_id);
15198 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15199 &cq_set->u.request, eq->queue_id);
15202 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15203 &cq_set->u.request, eq->queue_id);
15206 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15207 &cq_set->u.request, eq->queue_id);
15210 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15211 &cq_set->u.request, eq->queue_id);
15214 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15215 &cq_set->u.request, eq->queue_id);
15218 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15219 &cq_set->u.request, eq->queue_id);
15222 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15223 &cq_set->u.request, eq->queue_id);
15226 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15227 &cq_set->u.request, eq->queue_id);
15230 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15231 &cq_set->u.request, eq->queue_id);
15234 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15235 &cq_set->u.request, eq->queue_id);
15238 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15239 &cq_set->u.request, eq->queue_id);
15242 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15243 &cq_set->u.request, eq->queue_id);
15246 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15247 &cq_set->u.request, eq->queue_id);
15251 /* link the cq onto the parent eq child list */
15252 list_add_tail(&cq->list, &eq->child_list);
15253 /* Set up completion queue's type and subtype */
15255 cq->subtype = subtype;
15256 cq->assoc_qid = eq->queue_id;
15257 cq->host_index = 0;
15259 cq->entry_repost = LPFC_CQ_REPOST;
15263 list_for_each_entry(dmabuf, &cq->page_list, list) {
15264 memset(dmabuf->virt, 0, hw_page_size);
15265 cnt = page_idx + dmabuf->buffer_tag;
15266 cq_set->u.request.page[cnt].addr_lo =
15267 putPaddrLow(dmabuf->phys);
15268 cq_set->u.request.page[cnt].addr_hi =
15269 putPaddrHigh(dmabuf->phys);
15275 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15277 /* The IOCTL status is embedded in the mailbox subheader. */
15278 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15279 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15280 if (shdr_status || shdr_add_status || rc) {
15281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15282 "3119 CQ_CREATE_SET mailbox failed with "
15283 "status x%x add_status x%x, mbx status x%x\n",
15284 shdr_status, shdr_add_status, rc);
15288 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15289 if (rc == 0xFFFF) {
15294 for (idx = 0; idx < numcq; idx++) {
15296 cq->queue_id = rc + idx;
15300 lpfc_sli4_mbox_cmd_free(phba, mbox);
15305 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15306 * @phba: HBA structure that indicates port to create a queue on.
15307 * @mq: The queue structure to use to create the mailbox queue.
15308 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15309 * @cq: The completion queue to associate with this cq.
15311 * This function provides failback (fb) functionality when the
15312 * mq_create_ext fails on older FW generations. It's purpose is identical
15313 * to mq_create_ext otherwise.
15315 * This routine cannot fail as all attributes were previously accessed and
15316 * initialized in mq_create_ext.
15319 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15320 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15322 struct lpfc_mbx_mq_create *mq_create;
15323 struct lpfc_dmabuf *dmabuf;
15326 length = (sizeof(struct lpfc_mbx_mq_create) -
15327 sizeof(struct lpfc_sli4_cfg_mhdr));
15328 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15329 LPFC_MBOX_OPCODE_MQ_CREATE,
15330 length, LPFC_SLI4_MBX_EMBED);
15331 mq_create = &mbox->u.mqe.un.mq_create;
15332 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15334 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15336 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15337 switch (mq->entry_count) {
15339 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15340 LPFC_MQ_RING_SIZE_16);
15343 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15344 LPFC_MQ_RING_SIZE_32);
15347 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15348 LPFC_MQ_RING_SIZE_64);
15351 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15352 LPFC_MQ_RING_SIZE_128);
15355 list_for_each_entry(dmabuf, &mq->page_list, list) {
15356 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15357 putPaddrLow(dmabuf->phys);
15358 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15359 putPaddrHigh(dmabuf->phys);
15364 * lpfc_mq_create - Create a mailbox Queue on the HBA
15365 * @phba: HBA structure that indicates port to create a queue on.
15366 * @mq: The queue structure to use to create the mailbox queue.
15367 * @cq: The completion queue to associate with this cq.
15368 * @subtype: The queue's subtype.
15370 * This function creates a mailbox queue, as detailed in @mq, on a port,
15371 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15373 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15374 * is used to get the entry count and entry size that are necessary to
15375 * determine the number of pages to allocate and use for this queue. This
15376 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15377 * mailbox queue. This function is asynchronous and will wait for the mailbox
15378 * command to finish before continuing.
15380 * On success this function will return a zero. If unable to allocate enough
15381 * memory this function will return -ENOMEM. If the queue create mailbox command
15382 * fails this function will return -ENXIO.
15385 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15386 struct lpfc_queue *cq, uint32_t subtype)
15388 struct lpfc_mbx_mq_create *mq_create;
15389 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15390 struct lpfc_dmabuf *dmabuf;
15391 LPFC_MBOXQ_t *mbox;
15392 int rc, length, status = 0;
15393 uint32_t shdr_status, shdr_add_status;
15394 union lpfc_sli4_cfg_shdr *shdr;
15395 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15397 /* sanity check on queue memory */
15400 if (!phba->sli4_hba.pc_sli4_params.supported)
15401 hw_page_size = SLI4_PAGE_SIZE;
15403 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15406 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15407 sizeof(struct lpfc_sli4_cfg_mhdr));
15408 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15409 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15410 length, LPFC_SLI4_MBX_EMBED);
15412 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15413 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15414 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15415 &mq_create_ext->u.request, mq->page_count);
15416 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15417 &mq_create_ext->u.request, 1);
15418 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15419 &mq_create_ext->u.request, 1);
15420 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15421 &mq_create_ext->u.request, 1);
15422 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15423 &mq_create_ext->u.request, 1);
15424 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15425 &mq_create_ext->u.request, 1);
15426 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15427 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15428 phba->sli4_hba.pc_sli4_params.mqv);
15429 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15430 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15433 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15435 switch (mq->entry_count) {
15437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15438 "0362 Unsupported MQ count. (%d)\n",
15440 if (mq->entry_count < 16) {
15444 /* otherwise default to smallest count (drop through) */
15446 bf_set(lpfc_mq_context_ring_size,
15447 &mq_create_ext->u.request.context,
15448 LPFC_MQ_RING_SIZE_16);
15451 bf_set(lpfc_mq_context_ring_size,
15452 &mq_create_ext->u.request.context,
15453 LPFC_MQ_RING_SIZE_32);
15456 bf_set(lpfc_mq_context_ring_size,
15457 &mq_create_ext->u.request.context,
15458 LPFC_MQ_RING_SIZE_64);
15461 bf_set(lpfc_mq_context_ring_size,
15462 &mq_create_ext->u.request.context,
15463 LPFC_MQ_RING_SIZE_128);
15466 list_for_each_entry(dmabuf, &mq->page_list, list) {
15467 memset(dmabuf->virt, 0, hw_page_size);
15468 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15469 putPaddrLow(dmabuf->phys);
15470 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15471 putPaddrHigh(dmabuf->phys);
15473 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15474 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15475 &mq_create_ext->u.response);
15476 if (rc != MBX_SUCCESS) {
15477 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15478 "2795 MQ_CREATE_EXT failed with "
15479 "status x%x. Failback to MQ_CREATE.\n",
15481 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15482 mq_create = &mbox->u.mqe.un.mq_create;
15483 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15484 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15485 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15486 &mq_create->u.response);
15489 /* The IOCTL status is embedded in the mailbox subheader. */
15490 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15491 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15492 if (shdr_status || shdr_add_status || rc) {
15493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15494 "2502 MQ_CREATE mailbox failed with "
15495 "status x%x add_status x%x, mbx status x%x\n",
15496 shdr_status, shdr_add_status, rc);
15500 if (mq->queue_id == 0xFFFF) {
15504 mq->type = LPFC_MQ;
15505 mq->assoc_qid = cq->queue_id;
15506 mq->subtype = subtype;
15507 mq->host_index = 0;
15509 mq->entry_repost = LPFC_MQ_REPOST;
15511 /* link the mq onto the parent cq child list */
15512 list_add_tail(&mq->list, &cq->child_list);
15514 mempool_free(mbox, phba->mbox_mem_pool);
15519 * lpfc_wq_create - Create a Work Queue on the HBA
15520 * @phba: HBA structure that indicates port to create a queue on.
15521 * @wq: The queue structure to use to create the work queue.
15522 * @cq: The completion queue to bind this work queue to.
15523 * @subtype: The subtype of the work queue indicating its functionality.
15525 * This function creates a work queue, as detailed in @wq, on a port, described
15526 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15528 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15529 * is used to get the entry count and entry size that are necessary to
15530 * determine the number of pages to allocate and use for this queue. The @cq
15531 * is used to indicate which completion queue to bind this work queue to. This
15532 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15533 * work queue. This function is asynchronous and will wait for the mailbox
15534 * command to finish before continuing.
15536 * On success this function will return a zero. If unable to allocate enough
15537 * memory this function will return -ENOMEM. If the queue create mailbox command
15538 * fails this function will return -ENXIO.
15541 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15542 struct lpfc_queue *cq, uint32_t subtype)
15544 struct lpfc_mbx_wq_create *wq_create;
15545 struct lpfc_dmabuf *dmabuf;
15546 LPFC_MBOXQ_t *mbox;
15547 int rc, length, status = 0;
15548 uint32_t shdr_status, shdr_add_status;
15549 union lpfc_sli4_cfg_shdr *shdr;
15550 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15551 struct dma_address *page;
15552 void __iomem *bar_memmap_p;
15553 uint32_t db_offset;
15554 uint16_t pci_barset;
15555 uint8_t dpp_barset;
15556 uint32_t dpp_offset;
15557 unsigned long pg_addr;
15558 uint8_t wq_create_version;
15560 /* sanity check on queue memory */
15563 if (!phba->sli4_hba.pc_sli4_params.supported)
15564 hw_page_size = wq->page_size;
15566 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15569 length = (sizeof(struct lpfc_mbx_wq_create) -
15570 sizeof(struct lpfc_sli4_cfg_mhdr));
15571 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15572 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15573 length, LPFC_SLI4_MBX_EMBED);
15574 wq_create = &mbox->u.mqe.un.wq_create;
15575 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15576 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15578 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15581 /* wqv is the earliest version supported, NOT the latest */
15582 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15583 phba->sli4_hba.pc_sli4_params.wqv);
15585 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15586 (wq->page_size > SLI4_PAGE_SIZE))
15587 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15589 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15592 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15593 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15595 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15597 switch (wq_create_version) {
15598 case LPFC_Q_CREATE_VERSION_1:
15599 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15601 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15602 LPFC_Q_CREATE_VERSION_1);
15604 switch (wq->entry_size) {
15607 bf_set(lpfc_mbx_wq_create_wqe_size,
15608 &wq_create->u.request_1,
15609 LPFC_WQ_WQE_SIZE_64);
15612 bf_set(lpfc_mbx_wq_create_wqe_size,
15613 &wq_create->u.request_1,
15614 LPFC_WQ_WQE_SIZE_128);
15617 /* Request DPP by default */
15618 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15619 bf_set(lpfc_mbx_wq_create_page_size,
15620 &wq_create->u.request_1,
15621 (wq->page_size / SLI4_PAGE_SIZE));
15622 page = wq_create->u.request_1.page;
15625 page = wq_create->u.request.page;
15629 list_for_each_entry(dmabuf, &wq->page_list, list) {
15630 memset(dmabuf->virt, 0, hw_page_size);
15631 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15632 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15635 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15636 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15638 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15639 /* The IOCTL status is embedded in the mailbox subheader. */
15640 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15641 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15642 if (shdr_status || shdr_add_status || rc) {
15643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15644 "2503 WQ_CREATE mailbox failed with "
15645 "status x%x add_status x%x, mbx status x%x\n",
15646 shdr_status, shdr_add_status, rc);
15651 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15652 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15653 &wq_create->u.response);
15655 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15656 &wq_create->u.response_1);
15658 if (wq->queue_id == 0xFFFF) {
15663 wq->db_format = LPFC_DB_LIST_FORMAT;
15664 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15665 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15666 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15667 &wq_create->u.response);
15668 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15669 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15671 "3265 WQ[%d] doorbell format "
15672 "not supported: x%x\n",
15673 wq->queue_id, wq->db_format);
15677 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15678 &wq_create->u.response);
15679 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15681 if (!bar_memmap_p) {
15682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15683 "3263 WQ[%d] failed to memmap "
15684 "pci barset:x%x\n",
15685 wq->queue_id, pci_barset);
15689 db_offset = wq_create->u.response.doorbell_offset;
15690 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15691 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15692 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15693 "3252 WQ[%d] doorbell offset "
15694 "not supported: x%x\n",
15695 wq->queue_id, db_offset);
15699 wq->db_regaddr = bar_memmap_p + db_offset;
15700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15701 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15702 "format:x%x\n", wq->queue_id,
15703 pci_barset, db_offset, wq->db_format);
15705 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15707 /* Check if DPP was honored by the firmware */
15708 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15709 &wq_create->u.response_1);
15710 if (wq->dpp_enable) {
15711 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15712 &wq_create->u.response_1);
15713 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15715 if (!bar_memmap_p) {
15716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15717 "3267 WQ[%d] failed to memmap "
15718 "pci barset:x%x\n",
15719 wq->queue_id, pci_barset);
15723 db_offset = wq_create->u.response_1.doorbell_offset;
15724 wq->db_regaddr = bar_memmap_p + db_offset;
15725 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15726 &wq_create->u.response_1);
15727 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15728 &wq_create->u.response_1);
15729 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15731 if (!bar_memmap_p) {
15732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15733 "3268 WQ[%d] failed to memmap "
15734 "pci barset:x%x\n",
15735 wq->queue_id, dpp_barset);
15739 dpp_offset = wq_create->u.response_1.dpp_offset;
15740 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15741 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15742 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15743 "dpp_id:x%x dpp_barset:x%x "
15744 "dpp_offset:x%x\n",
15745 wq->queue_id, pci_barset, db_offset,
15746 wq->dpp_id, dpp_barset, dpp_offset);
15748 /* Enable combined writes for DPP aperture */
15749 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15751 rc = set_memory_wc(pg_addr, 1);
15753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15754 "3272 Cannot setup Combined "
15755 "Write on WQ[%d] - disable DPP\n",
15757 phba->cfg_enable_dpp = 0;
15760 phba->cfg_enable_dpp = 0;
15763 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15765 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15766 if (wq->pring == NULL) {
15770 wq->type = LPFC_WQ;
15771 wq->assoc_qid = cq->queue_id;
15772 wq->subtype = subtype;
15773 wq->host_index = 0;
15775 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
15777 /* link the wq onto the parent cq child list */
15778 list_add_tail(&wq->list, &cq->child_list);
15780 mempool_free(mbox, phba->mbox_mem_pool);
15785 * lpfc_rq_create - Create a Receive Queue on the HBA
15786 * @phba: HBA structure that indicates port to create a queue on.
15787 * @hrq: The queue structure to use to create the header receive queue.
15788 * @drq: The queue structure to use to create the data receive queue.
15789 * @cq: The completion queue to bind this work queue to.
15791 * This function creates a receive buffer queue pair , as detailed in @hrq and
15792 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15795 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15796 * struct is used to get the entry count that is necessary to determine the
15797 * number of pages to use for this queue. The @cq is used to indicate which
15798 * completion queue to bind received buffers that are posted to these queues to.
15799 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15800 * receive queue pair. This function is asynchronous and will wait for the
15801 * mailbox command to finish before continuing.
15803 * On success this function will return a zero. If unable to allocate enough
15804 * memory this function will return -ENOMEM. If the queue create mailbox command
15805 * fails this function will return -ENXIO.
15808 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15809 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15811 struct lpfc_mbx_rq_create *rq_create;
15812 struct lpfc_dmabuf *dmabuf;
15813 LPFC_MBOXQ_t *mbox;
15814 int rc, length, status = 0;
15815 uint32_t shdr_status, shdr_add_status;
15816 union lpfc_sli4_cfg_shdr *shdr;
15817 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15818 void __iomem *bar_memmap_p;
15819 uint32_t db_offset;
15820 uint16_t pci_barset;
15822 /* sanity check on queue memory */
15823 if (!hrq || !drq || !cq)
15825 if (!phba->sli4_hba.pc_sli4_params.supported)
15826 hw_page_size = SLI4_PAGE_SIZE;
15828 if (hrq->entry_count != drq->entry_count)
15830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15833 length = (sizeof(struct lpfc_mbx_rq_create) -
15834 sizeof(struct lpfc_sli4_cfg_mhdr));
15835 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15836 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15837 length, LPFC_SLI4_MBX_EMBED);
15838 rq_create = &mbox->u.mqe.un.rq_create;
15839 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15840 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15841 phba->sli4_hba.pc_sli4_params.rqv);
15842 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15843 bf_set(lpfc_rq_context_rqe_count_1,
15844 &rq_create->u.request.context,
15846 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15847 bf_set(lpfc_rq_context_rqe_size,
15848 &rq_create->u.request.context,
15850 bf_set(lpfc_rq_context_page_size,
15851 &rq_create->u.request.context,
15852 LPFC_RQ_PAGE_SIZE_4096);
15854 switch (hrq->entry_count) {
15856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15857 "2535 Unsupported RQ count. (%d)\n",
15859 if (hrq->entry_count < 512) {
15863 /* otherwise default to smallest count (drop through) */
15865 bf_set(lpfc_rq_context_rqe_count,
15866 &rq_create->u.request.context,
15867 LPFC_RQ_RING_SIZE_512);
15870 bf_set(lpfc_rq_context_rqe_count,
15871 &rq_create->u.request.context,
15872 LPFC_RQ_RING_SIZE_1024);
15875 bf_set(lpfc_rq_context_rqe_count,
15876 &rq_create->u.request.context,
15877 LPFC_RQ_RING_SIZE_2048);
15880 bf_set(lpfc_rq_context_rqe_count,
15881 &rq_create->u.request.context,
15882 LPFC_RQ_RING_SIZE_4096);
15885 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15886 LPFC_HDR_BUF_SIZE);
15888 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15890 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15892 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15893 memset(dmabuf->virt, 0, hw_page_size);
15894 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15895 putPaddrLow(dmabuf->phys);
15896 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15897 putPaddrHigh(dmabuf->phys);
15899 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15900 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15902 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15903 /* The IOCTL status is embedded in the mailbox subheader. */
15904 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15905 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15906 if (shdr_status || shdr_add_status || rc) {
15907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15908 "2504 RQ_CREATE mailbox failed with "
15909 "status x%x add_status x%x, mbx status x%x\n",
15910 shdr_status, shdr_add_status, rc);
15914 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15915 if (hrq->queue_id == 0xFFFF) {
15920 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15921 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15922 &rq_create->u.response);
15923 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15924 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15925 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15926 "3262 RQ [%d] doorbell format not "
15927 "supported: x%x\n", hrq->queue_id,
15933 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15934 &rq_create->u.response);
15935 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15936 if (!bar_memmap_p) {
15937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15938 "3269 RQ[%d] failed to memmap pci "
15939 "barset:x%x\n", hrq->queue_id,
15945 db_offset = rq_create->u.response.doorbell_offset;
15946 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15947 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15948 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15949 "3270 RQ[%d] doorbell offset not "
15950 "supported: x%x\n", hrq->queue_id,
15955 hrq->db_regaddr = bar_memmap_p + db_offset;
15956 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15957 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15958 "format:x%x\n", hrq->queue_id, pci_barset,
15959 db_offset, hrq->db_format);
15961 hrq->db_format = LPFC_DB_RING_FORMAT;
15962 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15964 hrq->type = LPFC_HRQ;
15965 hrq->assoc_qid = cq->queue_id;
15966 hrq->subtype = subtype;
15967 hrq->host_index = 0;
15968 hrq->hba_index = 0;
15969 hrq->entry_repost = LPFC_RQ_REPOST;
15971 /* now create the data queue */
15972 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15973 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15974 length, LPFC_SLI4_MBX_EMBED);
15975 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15976 phba->sli4_hba.pc_sli4_params.rqv);
15977 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15978 bf_set(lpfc_rq_context_rqe_count_1,
15979 &rq_create->u.request.context, hrq->entry_count);
15980 if (subtype == LPFC_NVMET)
15981 rq_create->u.request.context.buffer_size =
15982 LPFC_NVMET_DATA_BUF_SIZE;
15984 rq_create->u.request.context.buffer_size =
15985 LPFC_DATA_BUF_SIZE;
15986 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15988 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15989 (PAGE_SIZE/SLI4_PAGE_SIZE));
15991 switch (drq->entry_count) {
15993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15994 "2536 Unsupported RQ count. (%d)\n",
15996 if (drq->entry_count < 512) {
16000 /* otherwise default to smallest count (drop through) */
16002 bf_set(lpfc_rq_context_rqe_count,
16003 &rq_create->u.request.context,
16004 LPFC_RQ_RING_SIZE_512);
16007 bf_set(lpfc_rq_context_rqe_count,
16008 &rq_create->u.request.context,
16009 LPFC_RQ_RING_SIZE_1024);
16012 bf_set(lpfc_rq_context_rqe_count,
16013 &rq_create->u.request.context,
16014 LPFC_RQ_RING_SIZE_2048);
16017 bf_set(lpfc_rq_context_rqe_count,
16018 &rq_create->u.request.context,
16019 LPFC_RQ_RING_SIZE_4096);
16022 if (subtype == LPFC_NVMET)
16023 bf_set(lpfc_rq_context_buf_size,
16024 &rq_create->u.request.context,
16025 LPFC_NVMET_DATA_BUF_SIZE);
16027 bf_set(lpfc_rq_context_buf_size,
16028 &rq_create->u.request.context,
16029 LPFC_DATA_BUF_SIZE);
16031 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16033 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16035 list_for_each_entry(dmabuf, &drq->page_list, list) {
16036 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16037 putPaddrLow(dmabuf->phys);
16038 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16039 putPaddrHigh(dmabuf->phys);
16041 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16042 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16043 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16044 /* The IOCTL status is embedded in the mailbox subheader. */
16045 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16046 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16047 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16048 if (shdr_status || shdr_add_status || rc) {
16052 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16053 if (drq->queue_id == 0xFFFF) {
16057 drq->type = LPFC_DRQ;
16058 drq->assoc_qid = cq->queue_id;
16059 drq->subtype = subtype;
16060 drq->host_index = 0;
16061 drq->hba_index = 0;
16062 drq->entry_repost = LPFC_RQ_REPOST;
16064 /* link the header and data RQs onto the parent cq child list */
16065 list_add_tail(&hrq->list, &cq->child_list);
16066 list_add_tail(&drq->list, &cq->child_list);
16069 mempool_free(mbox, phba->mbox_mem_pool);
16074 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16075 * @phba: HBA structure that indicates port to create a queue on.
16076 * @hrqp: The queue structure array to use to create the header receive queues.
16077 * @drqp: The queue structure array to use to create the data receive queues.
16078 * @cqp: The completion queue array to bind these receive queues to.
16080 * This function creates a receive buffer queue pair , as detailed in @hrq and
16081 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16084 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16085 * struct is used to get the entry count that is necessary to determine the
16086 * number of pages to use for this queue. The @cq is used to indicate which
16087 * completion queue to bind received buffers that are posted to these queues to.
16088 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16089 * receive queue pair. This function is asynchronous and will wait for the
16090 * mailbox command to finish before continuing.
16092 * On success this function will return a zero. If unable to allocate enough
16093 * memory this function will return -ENOMEM. If the queue create mailbox command
16094 * fails this function will return -ENXIO.
16097 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16098 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16101 struct lpfc_queue *hrq, *drq, *cq;
16102 struct lpfc_mbx_rq_create_v2 *rq_create;
16103 struct lpfc_dmabuf *dmabuf;
16104 LPFC_MBOXQ_t *mbox;
16105 int rc, length, alloclen, status = 0;
16106 int cnt, idx, numrq, page_idx = 0;
16107 uint32_t shdr_status, shdr_add_status;
16108 union lpfc_sli4_cfg_shdr *shdr;
16109 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16111 numrq = phba->cfg_nvmet_mrq;
16112 /* sanity check on array memory */
16113 if (!hrqp || !drqp || !cqp || !numrq)
16115 if (!phba->sli4_hba.pc_sli4_params.supported)
16116 hw_page_size = SLI4_PAGE_SIZE;
16118 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16122 length = sizeof(struct lpfc_mbx_rq_create_v2);
16123 length += ((2 * numrq * hrqp[0]->page_count) *
16124 sizeof(struct dma_address));
16126 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16127 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16128 LPFC_SLI4_MBX_NEMBED);
16129 if (alloclen < length) {
16130 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16131 "3099 Allocated DMA memory size (%d) is "
16132 "less than the requested DMA memory size "
16133 "(%d)\n", alloclen, length);
16140 rq_create = mbox->sge_array->addr[0];
16141 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16143 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16146 for (idx = 0; idx < numrq; idx++) {
16151 /* sanity check on queue memory */
16152 if (!hrq || !drq || !cq) {
16157 if (hrq->entry_count != drq->entry_count) {
16163 bf_set(lpfc_mbx_rq_create_num_pages,
16164 &rq_create->u.request,
16166 bf_set(lpfc_mbx_rq_create_rq_cnt,
16167 &rq_create->u.request, (numrq * 2));
16168 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16170 bf_set(lpfc_rq_context_base_cq,
16171 &rq_create->u.request.context,
16173 bf_set(lpfc_rq_context_data_size,
16174 &rq_create->u.request.context,
16175 LPFC_NVMET_DATA_BUF_SIZE);
16176 bf_set(lpfc_rq_context_hdr_size,
16177 &rq_create->u.request.context,
16178 LPFC_HDR_BUF_SIZE);
16179 bf_set(lpfc_rq_context_rqe_count_1,
16180 &rq_create->u.request.context,
16182 bf_set(lpfc_rq_context_rqe_size,
16183 &rq_create->u.request.context,
16185 bf_set(lpfc_rq_context_page_size,
16186 &rq_create->u.request.context,
16187 (PAGE_SIZE/SLI4_PAGE_SIZE));
16190 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16191 memset(dmabuf->virt, 0, hw_page_size);
16192 cnt = page_idx + dmabuf->buffer_tag;
16193 rq_create->u.request.page[cnt].addr_lo =
16194 putPaddrLow(dmabuf->phys);
16195 rq_create->u.request.page[cnt].addr_hi =
16196 putPaddrHigh(dmabuf->phys);
16202 list_for_each_entry(dmabuf, &drq->page_list, list) {
16203 memset(dmabuf->virt, 0, hw_page_size);
16204 cnt = page_idx + dmabuf->buffer_tag;
16205 rq_create->u.request.page[cnt].addr_lo =
16206 putPaddrLow(dmabuf->phys);
16207 rq_create->u.request.page[cnt].addr_hi =
16208 putPaddrHigh(dmabuf->phys);
16213 hrq->db_format = LPFC_DB_RING_FORMAT;
16214 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16215 hrq->type = LPFC_HRQ;
16216 hrq->assoc_qid = cq->queue_id;
16217 hrq->subtype = subtype;
16218 hrq->host_index = 0;
16219 hrq->hba_index = 0;
16220 hrq->entry_repost = LPFC_RQ_REPOST;
16222 drq->db_format = LPFC_DB_RING_FORMAT;
16223 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16224 drq->type = LPFC_DRQ;
16225 drq->assoc_qid = cq->queue_id;
16226 drq->subtype = subtype;
16227 drq->host_index = 0;
16228 drq->hba_index = 0;
16229 drq->entry_repost = LPFC_RQ_REPOST;
16231 list_add_tail(&hrq->list, &cq->child_list);
16232 list_add_tail(&drq->list, &cq->child_list);
16235 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16236 /* The IOCTL status is embedded in the mailbox subheader. */
16237 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16238 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16239 if (shdr_status || shdr_add_status || rc) {
16240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16241 "3120 RQ_CREATE mailbox failed with "
16242 "status x%x add_status x%x, mbx status x%x\n",
16243 shdr_status, shdr_add_status, rc);
16247 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16248 if (rc == 0xFFFF) {
16253 /* Initialize all RQs with associated queue id */
16254 for (idx = 0; idx < numrq; idx++) {
16256 hrq->queue_id = rc + (2 * idx);
16258 drq->queue_id = rc + (2 * idx) + 1;
16262 lpfc_sli4_mbox_cmd_free(phba, mbox);
16267 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16268 * @eq: The queue structure associated with the queue to destroy.
16270 * This function destroys a queue, as detailed in @eq by sending an mailbox
16271 * command, specific to the type of queue, to the HBA.
16273 * The @eq struct is used to get the queue ID of the queue to destroy.
16275 * On success this function will return a zero. If the queue destroy mailbox
16276 * command fails this function will return -ENXIO.
16279 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16281 LPFC_MBOXQ_t *mbox;
16282 int rc, length, status = 0;
16283 uint32_t shdr_status, shdr_add_status;
16284 union lpfc_sli4_cfg_shdr *shdr;
16286 /* sanity check on queue memory */
16289 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16292 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16293 sizeof(struct lpfc_sli4_cfg_mhdr));
16294 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16295 LPFC_MBOX_OPCODE_EQ_DESTROY,
16296 length, LPFC_SLI4_MBX_EMBED);
16297 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16299 mbox->vport = eq->phba->pport;
16300 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16302 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16303 /* The IOCTL status is embedded in the mailbox subheader. */
16304 shdr = (union lpfc_sli4_cfg_shdr *)
16305 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16306 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16307 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16308 if (shdr_status || shdr_add_status || rc) {
16309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16310 "2505 EQ_DESTROY mailbox failed with "
16311 "status x%x add_status x%x, mbx status x%x\n",
16312 shdr_status, shdr_add_status, rc);
16316 /* Remove eq from any list */
16317 list_del_init(&eq->list);
16318 mempool_free(mbox, eq->phba->mbox_mem_pool);
16323 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16324 * @cq: The queue structure associated with the queue to destroy.
16326 * This function destroys a queue, as detailed in @cq by sending an mailbox
16327 * command, specific to the type of queue, to the HBA.
16329 * The @cq struct is used to get the queue ID of the queue to destroy.
16331 * On success this function will return a zero. If the queue destroy mailbox
16332 * command fails this function will return -ENXIO.
16335 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16337 LPFC_MBOXQ_t *mbox;
16338 int rc, length, status = 0;
16339 uint32_t shdr_status, shdr_add_status;
16340 union lpfc_sli4_cfg_shdr *shdr;
16342 /* sanity check on queue memory */
16345 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16348 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16349 sizeof(struct lpfc_sli4_cfg_mhdr));
16350 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16351 LPFC_MBOX_OPCODE_CQ_DESTROY,
16352 length, LPFC_SLI4_MBX_EMBED);
16353 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16355 mbox->vport = cq->phba->pport;
16356 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16357 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16358 /* The IOCTL status is embedded in the mailbox subheader. */
16359 shdr = (union lpfc_sli4_cfg_shdr *)
16360 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16361 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16362 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16363 if (shdr_status || shdr_add_status || rc) {
16364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16365 "2506 CQ_DESTROY mailbox failed with "
16366 "status x%x add_status x%x, mbx status x%x\n",
16367 shdr_status, shdr_add_status, rc);
16370 /* Remove cq from any list */
16371 list_del_init(&cq->list);
16372 mempool_free(mbox, cq->phba->mbox_mem_pool);
16377 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16378 * @qm: The queue structure associated with the queue to destroy.
16380 * This function destroys a queue, as detailed in @mq by sending an mailbox
16381 * command, specific to the type of queue, to the HBA.
16383 * The @mq struct is used to get the queue ID of the queue to destroy.
16385 * On success this function will return a zero. If the queue destroy mailbox
16386 * command fails this function will return -ENXIO.
16389 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16391 LPFC_MBOXQ_t *mbox;
16392 int rc, length, status = 0;
16393 uint32_t shdr_status, shdr_add_status;
16394 union lpfc_sli4_cfg_shdr *shdr;
16396 /* sanity check on queue memory */
16399 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16402 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16403 sizeof(struct lpfc_sli4_cfg_mhdr));
16404 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16405 LPFC_MBOX_OPCODE_MQ_DESTROY,
16406 length, LPFC_SLI4_MBX_EMBED);
16407 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16409 mbox->vport = mq->phba->pport;
16410 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16411 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16412 /* The IOCTL status is embedded in the mailbox subheader. */
16413 shdr = (union lpfc_sli4_cfg_shdr *)
16414 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16417 if (shdr_status || shdr_add_status || rc) {
16418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16419 "2507 MQ_DESTROY mailbox failed with "
16420 "status x%x add_status x%x, mbx status x%x\n",
16421 shdr_status, shdr_add_status, rc);
16424 /* Remove mq from any list */
16425 list_del_init(&mq->list);
16426 mempool_free(mbox, mq->phba->mbox_mem_pool);
16431 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16432 * @wq: The queue structure associated with the queue to destroy.
16434 * This function destroys a queue, as detailed in @wq by sending an mailbox
16435 * command, specific to the type of queue, to the HBA.
16437 * The @wq struct is used to get the queue ID of the queue to destroy.
16439 * On success this function will return a zero. If the queue destroy mailbox
16440 * command fails this function will return -ENXIO.
16443 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16445 LPFC_MBOXQ_t *mbox;
16446 int rc, length, status = 0;
16447 uint32_t shdr_status, shdr_add_status;
16448 union lpfc_sli4_cfg_shdr *shdr;
16450 /* sanity check on queue memory */
16453 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16456 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16457 sizeof(struct lpfc_sli4_cfg_mhdr));
16458 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16459 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16460 length, LPFC_SLI4_MBX_EMBED);
16461 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16463 mbox->vport = wq->phba->pport;
16464 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16465 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16466 shdr = (union lpfc_sli4_cfg_shdr *)
16467 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16468 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16469 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16470 if (shdr_status || shdr_add_status || rc) {
16471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16472 "2508 WQ_DESTROY mailbox failed with "
16473 "status x%x add_status x%x, mbx status x%x\n",
16474 shdr_status, shdr_add_status, rc);
16477 /* Remove wq from any list */
16478 list_del_init(&wq->list);
16481 mempool_free(mbox, wq->phba->mbox_mem_pool);
16486 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16487 * @rq: The queue structure associated with the queue to destroy.
16489 * This function destroys a queue, as detailed in @rq by sending an mailbox
16490 * command, specific to the type of queue, to the HBA.
16492 * The @rq struct is used to get the queue ID of the queue to destroy.
16494 * On success this function will return a zero. If the queue destroy mailbox
16495 * command fails this function will return -ENXIO.
16498 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16499 struct lpfc_queue *drq)
16501 LPFC_MBOXQ_t *mbox;
16502 int rc, length, status = 0;
16503 uint32_t shdr_status, shdr_add_status;
16504 union lpfc_sli4_cfg_shdr *shdr;
16506 /* sanity check on queue memory */
16509 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16512 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16513 sizeof(struct lpfc_sli4_cfg_mhdr));
16514 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16515 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16516 length, LPFC_SLI4_MBX_EMBED);
16517 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16519 mbox->vport = hrq->phba->pport;
16520 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16521 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16522 /* The IOCTL status is embedded in the mailbox subheader. */
16523 shdr = (union lpfc_sli4_cfg_shdr *)
16524 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16525 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16526 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16527 if (shdr_status || shdr_add_status || rc) {
16528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16529 "2509 RQ_DESTROY mailbox failed with "
16530 "status x%x add_status x%x, mbx status x%x\n",
16531 shdr_status, shdr_add_status, rc);
16532 if (rc != MBX_TIMEOUT)
16533 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16536 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16538 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16539 shdr = (union lpfc_sli4_cfg_shdr *)
16540 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16541 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16542 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16543 if (shdr_status || shdr_add_status || rc) {
16544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16545 "2510 RQ_DESTROY mailbox failed with "
16546 "status x%x add_status x%x, mbx status x%x\n",
16547 shdr_status, shdr_add_status, rc);
16550 list_del_init(&hrq->list);
16551 list_del_init(&drq->list);
16552 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16557 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16558 * @phba: The virtual port for which this call being executed.
16559 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16560 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16561 * @xritag: the xritag that ties this io to the SGL pages.
16563 * This routine will post the sgl pages for the IO that has the xritag
16564 * that is in the iocbq structure. The xritag is assigned during iocbq
16565 * creation and persists for as long as the driver is loaded.
16566 * if the caller has fewer than 256 scatter gather segments to map then
16567 * pdma_phys_addr1 should be 0.
16568 * If the caller needs to map more than 256 scatter gather segment then
16569 * pdma_phys_addr1 should be a valid physical address.
16570 * physical address for SGLs must be 64 byte aligned.
16571 * If you are going to map 2 SGL's then the first one must have 256 entries
16572 * the second sgl can have between 1 and 256 entries.
16576 * -ENXIO, -ENOMEM - Failure
16579 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16580 dma_addr_t pdma_phys_addr0,
16581 dma_addr_t pdma_phys_addr1,
16584 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16585 LPFC_MBOXQ_t *mbox;
16587 uint32_t shdr_status, shdr_add_status;
16589 union lpfc_sli4_cfg_shdr *shdr;
16591 if (xritag == NO_XRI) {
16592 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16593 "0364 Invalid param:\n");
16597 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16601 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16602 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16603 sizeof(struct lpfc_mbx_post_sgl_pages) -
16604 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16606 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16607 &mbox->u.mqe.un.post_sgl_pages;
16608 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16609 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16611 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16612 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16613 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16614 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16616 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16617 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16618 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16619 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16620 if (!phba->sli4_hba.intr_enable)
16621 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16623 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16624 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16626 /* The IOCTL status is embedded in the mailbox subheader. */
16627 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16628 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16629 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16630 if (rc != MBX_TIMEOUT)
16631 mempool_free(mbox, phba->mbox_mem_pool);
16632 if (shdr_status || shdr_add_status || rc) {
16633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16634 "2511 POST_SGL mailbox failed with "
16635 "status x%x add_status x%x, mbx status x%x\n",
16636 shdr_status, shdr_add_status, rc);
16642 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16643 * @phba: pointer to lpfc hba data structure.
16645 * This routine is invoked to post rpi header templates to the
16646 * HBA consistent with the SLI-4 interface spec. This routine
16647 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16648 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16651 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16652 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16655 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16660 * Fetch the next logical xri. Because this index is logical,
16661 * the driver starts at 0 each time.
16663 spin_lock_irq(&phba->hbalock);
16664 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16665 phba->sli4_hba.max_cfg_param.max_xri, 0);
16666 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16667 spin_unlock_irq(&phba->hbalock);
16670 set_bit(xri, phba->sli4_hba.xri_bmask);
16671 phba->sli4_hba.max_cfg_param.xri_used++;
16673 spin_unlock_irq(&phba->hbalock);
16678 * lpfc_sli4_free_xri - Release an xri for reuse.
16679 * @phba: pointer to lpfc hba data structure.
16681 * This routine is invoked to release an xri to the pool of
16682 * available rpis maintained by the driver.
16685 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16687 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16688 phba->sli4_hba.max_cfg_param.xri_used--;
16693 * lpfc_sli4_free_xri - Release an xri for reuse.
16694 * @phba: pointer to lpfc hba data structure.
16696 * This routine is invoked to release an xri to the pool of
16697 * available rpis maintained by the driver.
16700 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16702 spin_lock_irq(&phba->hbalock);
16703 __lpfc_sli4_free_xri(phba, xri);
16704 spin_unlock_irq(&phba->hbalock);
16708 * lpfc_sli4_next_xritag - Get an xritag for the io
16709 * @phba: Pointer to HBA context object.
16711 * This function gets an xritag for the iocb. If there is no unused xritag
16712 * it will return 0xffff.
16713 * The function returns the allocated xritag if successful, else returns zero.
16714 * Zero is not a valid xritag.
16715 * The caller is not required to hold any lock.
16718 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16720 uint16_t xri_index;
16722 xri_index = lpfc_sli4_alloc_xri(phba);
16723 if (xri_index == NO_XRI)
16724 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16725 "2004 Failed to allocate XRI.last XRITAG is %d"
16726 " Max XRI is %d, Used XRI is %d\n",
16728 phba->sli4_hba.max_cfg_param.max_xri,
16729 phba->sli4_hba.max_cfg_param.xri_used);
16734 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16735 * @phba: pointer to lpfc hba data structure.
16736 * @post_sgl_list: pointer to els sgl entry list.
16737 * @count: number of els sgl entries on the list.
16739 * This routine is invoked to post a block of driver's sgl pages to the
16740 * HBA using non-embedded mailbox command. No Lock is held. This routine
16741 * is only called when the driver is loading and after all IO has been
16745 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16746 struct list_head *post_sgl_list,
16749 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16750 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16751 struct sgl_page_pairs *sgl_pg_pairs;
16753 LPFC_MBOXQ_t *mbox;
16754 uint32_t reqlen, alloclen, pg_pairs;
16756 uint16_t xritag_start = 0;
16758 uint32_t shdr_status, shdr_add_status;
16759 union lpfc_sli4_cfg_shdr *shdr;
16761 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16762 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16763 if (reqlen > SLI4_PAGE_SIZE) {
16764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16765 "2559 Block sgl registration required DMA "
16766 "size (%d) great than a page\n", reqlen);
16770 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16774 /* Allocate DMA memory and set up the non-embedded mailbox command */
16775 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16776 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16777 LPFC_SLI4_MBX_NEMBED);
16779 if (alloclen < reqlen) {
16780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16781 "0285 Allocated DMA memory size (%d) is "
16782 "less than the requested DMA memory "
16783 "size (%d)\n", alloclen, reqlen);
16784 lpfc_sli4_mbox_cmd_free(phba, mbox);
16787 /* Set up the SGL pages in the non-embedded DMA pages */
16788 viraddr = mbox->sge_array->addr[0];
16789 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16790 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16793 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16794 /* Set up the sge entry */
16795 sgl_pg_pairs->sgl_pg0_addr_lo =
16796 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16797 sgl_pg_pairs->sgl_pg0_addr_hi =
16798 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16799 sgl_pg_pairs->sgl_pg1_addr_lo =
16800 cpu_to_le32(putPaddrLow(0));
16801 sgl_pg_pairs->sgl_pg1_addr_hi =
16802 cpu_to_le32(putPaddrHigh(0));
16804 /* Keep the first xritag on the list */
16806 xritag_start = sglq_entry->sli4_xritag;
16811 /* Complete initialization and perform endian conversion. */
16812 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16813 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16814 sgl->word0 = cpu_to_le32(sgl->word0);
16816 if (!phba->sli4_hba.intr_enable)
16817 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16819 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16820 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16822 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16823 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16824 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16825 if (rc != MBX_TIMEOUT)
16826 lpfc_sli4_mbox_cmd_free(phba, mbox);
16827 if (shdr_status || shdr_add_status || rc) {
16828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16829 "2513 POST_SGL_BLOCK mailbox command failed "
16830 "status x%x add_status x%x mbx status x%x\n",
16831 shdr_status, shdr_add_status, rc);
16838 * lpfc_sli4_post_common_sgl_block - post a block of nvme sgl list to firmware
16839 * @phba: pointer to lpfc hba data structure.
16840 * @nblist: pointer to nvme buffer list.
16841 * @count: number of scsi buffers on the list.
16843 * This routine is invoked to post a block of @count scsi sgl pages from a
16844 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16849 lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba,
16850 struct list_head *nblist,
16853 struct lpfc_nvme_buf *lpfc_ncmd;
16854 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16855 struct sgl_page_pairs *sgl_pg_pairs;
16857 LPFC_MBOXQ_t *mbox;
16858 uint32_t reqlen, alloclen, pg_pairs;
16860 uint16_t xritag_start = 0;
16862 uint32_t shdr_status, shdr_add_status;
16863 dma_addr_t pdma_phys_bpl1;
16864 union lpfc_sli4_cfg_shdr *shdr;
16866 /* Calculate the requested length of the dma memory */
16867 reqlen = count * sizeof(struct sgl_page_pairs) +
16868 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16869 if (reqlen > SLI4_PAGE_SIZE) {
16870 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16871 "6118 Block sgl registration required DMA "
16872 "size (%d) great than a page\n", reqlen);
16875 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16877 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16878 "6119 Failed to allocate mbox cmd memory\n");
16882 /* Allocate DMA memory and set up the non-embedded mailbox command */
16883 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16884 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16885 reqlen, LPFC_SLI4_MBX_NEMBED);
16887 if (alloclen < reqlen) {
16888 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16889 "6120 Allocated DMA memory size (%d) is "
16890 "less than the requested DMA memory "
16891 "size (%d)\n", alloclen, reqlen);
16892 lpfc_sli4_mbox_cmd_free(phba, mbox);
16896 /* Get the first SGE entry from the non-embedded DMA memory */
16897 viraddr = mbox->sge_array->addr[0];
16899 /* Set up the SGL pages in the non-embedded DMA pages */
16900 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16901 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16904 list_for_each_entry(lpfc_ncmd, nblist, list) {
16905 /* Set up the sge entry */
16906 sgl_pg_pairs->sgl_pg0_addr_lo =
16907 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16908 sgl_pg_pairs->sgl_pg0_addr_hi =
16909 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16910 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16911 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16914 pdma_phys_bpl1 = 0;
16915 sgl_pg_pairs->sgl_pg1_addr_lo =
16916 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16917 sgl_pg_pairs->sgl_pg1_addr_hi =
16918 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16919 /* Keep the first xritag on the list */
16921 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16925 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16926 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16927 /* Perform endian conversion if necessary */
16928 sgl->word0 = cpu_to_le32(sgl->word0);
16930 if (!phba->sli4_hba.intr_enable) {
16931 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16933 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16934 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16936 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16937 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16938 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16939 if (rc != MBX_TIMEOUT)
16940 lpfc_sli4_mbox_cmd_free(phba, mbox);
16941 if (shdr_status || shdr_add_status || rc) {
16942 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16943 "6125 POST_SGL_BLOCK mailbox command failed "
16944 "status x%x add_status x%x mbx status x%x\n",
16945 shdr_status, shdr_add_status, rc);
16952 * lpfc_sli4_post_common_sgl_list - Post blocks of nvme buffer sgls from a list
16953 * @phba: pointer to lpfc hba data structure.
16954 * @post_nblist: pointer to the nvme buffer list.
16956 * This routine walks a list of nvme buffers that was passed in. It attempts
16957 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16958 * uses the non-embedded SGL block post mailbox commands to post to the port.
16959 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16960 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16961 * must be local list, thus no lock is needed when manipulate the list.
16963 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16966 lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba,
16967 struct list_head *post_nblist, int sb_count)
16969 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
16970 int status, sgl_size;
16971 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16972 dma_addr_t pdma_phys_sgl1;
16973 int last_xritag = NO_XRI;
16975 unsigned long iflag;
16976 LIST_HEAD(prep_nblist);
16977 LIST_HEAD(blck_nblist);
16978 LIST_HEAD(nvme_nblist);
16984 sgl_size = phba->cfg_sg_dma_buf_size;
16985 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16986 list_del_init(&lpfc_ncmd->list);
16988 if ((last_xritag != NO_XRI) &&
16989 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16990 /* a hole in xri block, form a sgl posting block */
16991 list_splice_init(&prep_nblist, &blck_nblist);
16992 post_cnt = block_cnt - 1;
16993 /* prepare list for next posting block */
16994 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16997 /* prepare list for next posting block */
16998 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16999 /* enough sgls for non-embed sgl mbox command */
17000 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17001 list_splice_init(&prep_nblist, &blck_nblist);
17002 post_cnt = block_cnt;
17007 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17009 /* end of repost sgl list condition for NVME buffers */
17010 if (num_posting == sb_count) {
17011 if (post_cnt == 0) {
17012 /* last sgl posting block */
17013 list_splice_init(&prep_nblist, &blck_nblist);
17014 post_cnt = block_cnt;
17015 } else if (block_cnt == 1) {
17016 /* last single sgl with non-contiguous xri */
17017 if (sgl_size > SGL_PAGE_SIZE)
17019 lpfc_ncmd->dma_phys_sgl +
17022 pdma_phys_sgl1 = 0;
17023 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17024 status = lpfc_sli4_post_sgl(
17025 phba, lpfc_ncmd->dma_phys_sgl,
17026 pdma_phys_sgl1, cur_xritag);
17028 /* failure, put on abort nvme list */
17029 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
17031 /* success, put on NVME buffer list */
17032 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
17033 lpfc_ncmd->status = IOSTAT_SUCCESS;
17036 /* success, put on NVME buffer sgl list */
17037 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17041 /* continue until a nembed page worth of sgls */
17045 /* post block of NVME buffer list sgls */
17046 status = lpfc_sli4_post_common_sgl_block(phba, &blck_nblist,
17049 /* don't reset xirtag due to hole in xri block */
17050 if (block_cnt == 0)
17051 last_xritag = NO_XRI;
17053 /* reset NVME buffer post count for next round of posting */
17056 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17057 while (!list_empty(&blck_nblist)) {
17058 list_remove_head(&blck_nblist, lpfc_ncmd,
17059 struct lpfc_nvme_buf, list);
17061 /* failure, put on abort nvme list */
17062 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
17064 /* success, put on NVME buffer list */
17065 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
17066 lpfc_ncmd->status = IOSTAT_SUCCESS;
17069 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17072 /* Push NVME buffers with sgl posted to the available list */
17073 while (!list_empty(&nvme_nblist)) {
17074 list_remove_head(&nvme_nblist, lpfc_ncmd,
17075 struct lpfc_nvme_buf, list);
17076 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
17077 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
17078 spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
17079 list_add_tail(&lpfc_ncmd->list,
17080 &phba->lpfc_common_buf_list_put);
17081 phba->put_common_bufs++;
17082 spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
17088 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17089 * @phba: pointer to lpfc_hba struct that the frame was received on
17090 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17092 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17093 * valid type of frame that the LPFC driver will handle. This function will
17094 * return a zero if the frame is a valid frame or a non zero value when the
17095 * frame does not pass the check.
17098 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17100 /* make rctl_names static to save stack space */
17101 struct fc_vft_header *fc_vft_hdr;
17102 uint32_t *header = (uint32_t *) fc_hdr;
17104 switch (fc_hdr->fh_r_ctl) {
17105 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17106 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17107 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17108 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17109 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17110 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17111 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17112 case FC_RCTL_DD_CMD_STATUS: /* command status */
17113 case FC_RCTL_ELS_REQ: /* extended link services request */
17114 case FC_RCTL_ELS_REP: /* extended link services reply */
17115 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17116 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17117 case FC_RCTL_BA_NOP: /* basic link service NOP */
17118 case FC_RCTL_BA_ABTS: /* basic link service abort */
17119 case FC_RCTL_BA_RMC: /* remove connection */
17120 case FC_RCTL_BA_ACC: /* basic accept */
17121 case FC_RCTL_BA_RJT: /* basic reject */
17122 case FC_RCTL_BA_PRMT:
17123 case FC_RCTL_ACK_1: /* acknowledge_1 */
17124 case FC_RCTL_ACK_0: /* acknowledge_0 */
17125 case FC_RCTL_P_RJT: /* port reject */
17126 case FC_RCTL_F_RJT: /* fabric reject */
17127 case FC_RCTL_P_BSY: /* port busy */
17128 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17129 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17130 case FC_RCTL_LCR: /* link credit reset */
17131 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17132 case FC_RCTL_END: /* end */
17134 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17135 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17136 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17137 return lpfc_fc_frame_check(phba, fc_hdr);
17142 switch (fc_hdr->fh_type) {
17155 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17156 "2538 Received frame rctl:x%x, type:x%x, "
17157 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17158 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17159 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17160 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17161 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17162 be32_to_cpu(header[6]));
17165 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17166 "2539 Dropped frame rctl:x%x type:x%x\n",
17167 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17172 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17173 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17175 * This function processes the FC header to retrieve the VFI from the VF
17176 * header, if one exists. This function will return the VFI if one exists
17177 * or 0 if no VSAN Header exists.
17180 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17182 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17184 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17186 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17190 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17191 * @phba: Pointer to the HBA structure to search for the vport on
17192 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17193 * @fcfi: The FC Fabric ID that the frame came from
17195 * This function searches the @phba for a vport that matches the content of the
17196 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17197 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17198 * returns the matching vport pointer or NULL if unable to match frame to a
17201 static struct lpfc_vport *
17202 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17203 uint16_t fcfi, uint32_t did)
17205 struct lpfc_vport **vports;
17206 struct lpfc_vport *vport = NULL;
17209 if (did == Fabric_DID)
17210 return phba->pport;
17211 if ((phba->pport->fc_flag & FC_PT2PT) &&
17212 !(phba->link_state == LPFC_HBA_READY))
17213 return phba->pport;
17215 vports = lpfc_create_vport_work_array(phba);
17216 if (vports != NULL) {
17217 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17218 if (phba->fcf.fcfi == fcfi &&
17219 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17220 vports[i]->fc_myDID == did) {
17226 lpfc_destroy_vport_work_array(phba, vports);
17231 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17232 * @vport: The vport to work on.
17234 * This function updates the receive sequence time stamp for this vport. The
17235 * receive sequence time stamp indicates the time that the last frame of the
17236 * the sequence that has been idle for the longest amount of time was received.
17237 * the driver uses this time stamp to indicate if any received sequences have
17241 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17243 struct lpfc_dmabuf *h_buf;
17244 struct hbq_dmabuf *dmabuf = NULL;
17246 /* get the oldest sequence on the rcv list */
17247 h_buf = list_get_first(&vport->rcv_buffer_list,
17248 struct lpfc_dmabuf, list);
17251 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17252 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17256 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17257 * @vport: The vport that the received sequences were sent to.
17259 * This function cleans up all outstanding received sequences. This is called
17260 * by the driver when a link event or user action invalidates all the received
17264 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17266 struct lpfc_dmabuf *h_buf, *hnext;
17267 struct lpfc_dmabuf *d_buf, *dnext;
17268 struct hbq_dmabuf *dmabuf = NULL;
17270 /* start with the oldest sequence on the rcv list */
17271 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17272 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17273 list_del_init(&dmabuf->hbuf.list);
17274 list_for_each_entry_safe(d_buf, dnext,
17275 &dmabuf->dbuf.list, list) {
17276 list_del_init(&d_buf->list);
17277 lpfc_in_buf_free(vport->phba, d_buf);
17279 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17284 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17285 * @vport: The vport that the received sequences were sent to.
17287 * This function determines whether any received sequences have timed out by
17288 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17289 * indicates that there is at least one timed out sequence this routine will
17290 * go through the received sequences one at a time from most inactive to most
17291 * active to determine which ones need to be cleaned up. Once it has determined
17292 * that a sequence needs to be cleaned up it will simply free up the resources
17293 * without sending an abort.
17296 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17298 struct lpfc_dmabuf *h_buf, *hnext;
17299 struct lpfc_dmabuf *d_buf, *dnext;
17300 struct hbq_dmabuf *dmabuf = NULL;
17301 unsigned long timeout;
17302 int abort_count = 0;
17304 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17305 vport->rcv_buffer_time_stamp);
17306 if (list_empty(&vport->rcv_buffer_list) ||
17307 time_before(jiffies, timeout))
17309 /* start with the oldest sequence on the rcv list */
17310 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17311 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17312 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17313 dmabuf->time_stamp);
17314 if (time_before(jiffies, timeout))
17317 list_del_init(&dmabuf->hbuf.list);
17318 list_for_each_entry_safe(d_buf, dnext,
17319 &dmabuf->dbuf.list, list) {
17320 list_del_init(&d_buf->list);
17321 lpfc_in_buf_free(vport->phba, d_buf);
17323 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17326 lpfc_update_rcv_time_stamp(vport);
17330 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17331 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17333 * This function searches through the existing incomplete sequences that have
17334 * been sent to this @vport. If the frame matches one of the incomplete
17335 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17336 * make up that sequence. If no sequence is found that matches this frame then
17337 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17338 * This function returns a pointer to the first dmabuf in the sequence list that
17339 * the frame was linked to.
17341 static struct hbq_dmabuf *
17342 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17344 struct fc_frame_header *new_hdr;
17345 struct fc_frame_header *temp_hdr;
17346 struct lpfc_dmabuf *d_buf;
17347 struct lpfc_dmabuf *h_buf;
17348 struct hbq_dmabuf *seq_dmabuf = NULL;
17349 struct hbq_dmabuf *temp_dmabuf = NULL;
17352 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17353 dmabuf->time_stamp = jiffies;
17354 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17356 /* Use the hdr_buf to find the sequence that this frame belongs to */
17357 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17358 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17359 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17360 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17361 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17363 /* found a pending sequence that matches this frame */
17364 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17369 * This indicates first frame received for this sequence.
17370 * Queue the buffer on the vport's rcv_buffer_list.
17372 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17373 lpfc_update_rcv_time_stamp(vport);
17376 temp_hdr = seq_dmabuf->hbuf.virt;
17377 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17378 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17379 list_del_init(&seq_dmabuf->hbuf.list);
17380 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17381 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17382 lpfc_update_rcv_time_stamp(vport);
17385 /* move this sequence to the tail to indicate a young sequence */
17386 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17387 seq_dmabuf->time_stamp = jiffies;
17388 lpfc_update_rcv_time_stamp(vport);
17389 if (list_empty(&seq_dmabuf->dbuf.list)) {
17390 temp_hdr = dmabuf->hbuf.virt;
17391 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17394 /* find the correct place in the sequence to insert this frame */
17395 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17397 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17398 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17400 * If the frame's sequence count is greater than the frame on
17401 * the list then insert the frame right after this frame
17403 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17404 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17405 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17410 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17412 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17421 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17422 * @vport: pointer to a vitural port
17423 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17425 * This function tries to abort from the partially assembed sequence, described
17426 * by the information from basic abbort @dmabuf. It checks to see whether such
17427 * partially assembled sequence held by the driver. If so, it shall free up all
17428 * the frames from the partially assembled sequence.
17431 * true -- if there is matching partially assembled sequence present and all
17432 * the frames freed with the sequence;
17433 * false -- if there is no matching partially assembled sequence present so
17434 * nothing got aborted in the lower layer driver
17437 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17438 struct hbq_dmabuf *dmabuf)
17440 struct fc_frame_header *new_hdr;
17441 struct fc_frame_header *temp_hdr;
17442 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17443 struct hbq_dmabuf *seq_dmabuf = NULL;
17445 /* Use the hdr_buf to find the sequence that matches this frame */
17446 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17447 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17448 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17449 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17450 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17451 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17452 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17453 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17455 /* found a pending sequence that matches this frame */
17456 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17460 /* Free up all the frames from the partially assembled sequence */
17462 list_for_each_entry_safe(d_buf, n_buf,
17463 &seq_dmabuf->dbuf.list, list) {
17464 list_del_init(&d_buf->list);
17465 lpfc_in_buf_free(vport->phba, d_buf);
17473 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17474 * @vport: pointer to a vitural port
17475 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17477 * This function tries to abort from the assembed sequence from upper level
17478 * protocol, described by the information from basic abbort @dmabuf. It
17479 * checks to see whether such pending context exists at upper level protocol.
17480 * If so, it shall clean up the pending context.
17483 * true -- if there is matching pending context of the sequence cleaned
17485 * false -- if there is no matching pending context of the sequence present
17489 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17491 struct lpfc_hba *phba = vport->phba;
17494 /* Accepting abort at ulp with SLI4 only */
17495 if (phba->sli_rev < LPFC_SLI_REV4)
17498 /* Register all caring upper level protocols to attend abort */
17499 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17507 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17508 * @phba: Pointer to HBA context object.
17509 * @cmd_iocbq: pointer to the command iocbq structure.
17510 * @rsp_iocbq: pointer to the response iocbq structure.
17512 * This function handles the sequence abort response iocb command complete
17513 * event. It properly releases the memory allocated to the sequence abort
17517 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17518 struct lpfc_iocbq *cmd_iocbq,
17519 struct lpfc_iocbq *rsp_iocbq)
17521 struct lpfc_nodelist *ndlp;
17524 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17525 lpfc_nlp_put(ndlp);
17526 lpfc_nlp_not_used(ndlp);
17527 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17530 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17531 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17532 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17533 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17534 rsp_iocbq->iocb.ulpStatus,
17535 rsp_iocbq->iocb.un.ulpWord[4]);
17539 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17540 * @phba: Pointer to HBA context object.
17541 * @xri: xri id in transaction.
17543 * This function validates the xri maps to the known range of XRIs allocated an
17544 * used by the driver.
17547 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17552 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17553 if (xri == phba->sli4_hba.xri_ids[i])
17560 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17561 * @phba: Pointer to HBA context object.
17562 * @fc_hdr: pointer to a FC frame header.
17564 * This function sends a basic response to a previous unsol sequence abort
17565 * event after aborting the sequence handling.
17568 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17569 struct fc_frame_header *fc_hdr, bool aborted)
17571 struct lpfc_hba *phba = vport->phba;
17572 struct lpfc_iocbq *ctiocb = NULL;
17573 struct lpfc_nodelist *ndlp;
17574 uint16_t oxid, rxid, xri, lxri;
17575 uint32_t sid, fctl;
17579 if (!lpfc_is_link_up(phba))
17582 sid = sli4_sid_from_fc_hdr(fc_hdr);
17583 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17584 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17586 ndlp = lpfc_findnode_did(vport, sid);
17588 ndlp = lpfc_nlp_init(vport, sid);
17590 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17591 "1268 Failed to allocate ndlp for "
17592 "oxid:x%x SID:x%x\n", oxid, sid);
17595 /* Put ndlp onto pport node list */
17596 lpfc_enqueue_node(vport, ndlp);
17597 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17598 /* re-setup ndlp without removing from node list */
17599 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17601 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17602 "3275 Failed to active ndlp found "
17603 "for oxid:x%x SID:x%x\n", oxid, sid);
17608 /* Allocate buffer for rsp iocb */
17609 ctiocb = lpfc_sli_get_iocbq(phba);
17613 /* Extract the F_CTL field from FC_HDR */
17614 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17616 icmd = &ctiocb->iocb;
17617 icmd->un.xseq64.bdl.bdeSize = 0;
17618 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17619 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17620 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17621 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17623 /* Fill in the rest of iocb fields */
17624 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17625 icmd->ulpBdeCount = 0;
17627 icmd->ulpClass = CLASS3;
17628 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17629 ctiocb->context1 = lpfc_nlp_get(ndlp);
17631 ctiocb->iocb_cmpl = NULL;
17632 ctiocb->vport = phba->pport;
17633 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17634 ctiocb->sli4_lxritag = NO_XRI;
17635 ctiocb->sli4_xritag = NO_XRI;
17637 if (fctl & FC_FC_EX_CTX)
17638 /* Exchange responder sent the abort so we
17644 lxri = lpfc_sli4_xri_inrange(phba, xri);
17645 if (lxri != NO_XRI)
17646 lpfc_set_rrq_active(phba, ndlp, lxri,
17647 (xri == oxid) ? rxid : oxid, 0);
17648 /* For BA_ABTS from exchange responder, if the logical xri with
17649 * the oxid maps to the FCP XRI range, the port no longer has
17650 * that exchange context, send a BLS_RJT. Override the IOCB for
17653 if ((fctl & FC_FC_EX_CTX) &&
17654 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17655 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17656 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17657 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17658 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17661 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17662 * the driver no longer has that exchange, send a BLS_RJT. Override
17663 * the IOCB for a BA_RJT.
17665 if (aborted == false) {
17666 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17667 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17668 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17669 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17672 if (fctl & FC_FC_EX_CTX) {
17673 /* ABTS sent by responder to CT exchange, construction
17674 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17675 * field and RX_ID from ABTS for RX_ID field.
17677 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17679 /* ABTS sent by initiator to CT exchange, construction
17680 * of BA_ACC will need to allocate a new XRI as for the
17683 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17685 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17686 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17688 /* Xmit CT abts response on exchange <xid> */
17689 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17690 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17691 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17693 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17694 if (rc == IOCB_ERROR) {
17695 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17696 "2925 Failed to issue CT ABTS RSP x%x on "
17697 "xri x%x, Data x%x\n",
17698 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17700 lpfc_nlp_put(ndlp);
17701 ctiocb->context1 = NULL;
17702 lpfc_sli_release_iocbq(phba, ctiocb);
17707 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17708 * @vport: Pointer to the vport on which this sequence was received
17709 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17711 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17712 * receive sequence is only partially assembed by the driver, it shall abort
17713 * the partially assembled frames for the sequence. Otherwise, if the
17714 * unsolicited receive sequence has been completely assembled and passed to
17715 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17716 * unsolicited sequence has been aborted. After that, it will issue a basic
17717 * accept to accept the abort.
17720 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17721 struct hbq_dmabuf *dmabuf)
17723 struct lpfc_hba *phba = vport->phba;
17724 struct fc_frame_header fc_hdr;
17728 /* Make a copy of fc_hdr before the dmabuf being released */
17729 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17730 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17732 if (fctl & FC_FC_EX_CTX) {
17733 /* ABTS by responder to exchange, no cleanup needed */
17736 /* ABTS by initiator to exchange, need to do cleanup */
17737 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17738 if (aborted == false)
17739 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17741 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17743 if (phba->nvmet_support) {
17744 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17748 /* Respond with BA_ACC or BA_RJT accordingly */
17749 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17753 * lpfc_seq_complete - Indicates if a sequence is complete
17754 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17756 * This function checks the sequence, starting with the frame described by
17757 * @dmabuf, to see if all the frames associated with this sequence are present.
17758 * the frames associated with this sequence are linked to the @dmabuf using the
17759 * dbuf list. This function looks for two major things. 1) That the first frame
17760 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17761 * set. 3) That there are no holes in the sequence count. The function will
17762 * return 1 when the sequence is complete, otherwise it will return 0.
17765 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17767 struct fc_frame_header *hdr;
17768 struct lpfc_dmabuf *d_buf;
17769 struct hbq_dmabuf *seq_dmabuf;
17773 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17774 /* make sure first fame of sequence has a sequence count of zero */
17775 if (hdr->fh_seq_cnt != seq_count)
17777 fctl = (hdr->fh_f_ctl[0] << 16 |
17778 hdr->fh_f_ctl[1] << 8 |
17780 /* If last frame of sequence we can return success. */
17781 if (fctl & FC_FC_END_SEQ)
17783 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17784 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17785 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17786 /* If there is a hole in the sequence count then fail. */
17787 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17789 fctl = (hdr->fh_f_ctl[0] << 16 |
17790 hdr->fh_f_ctl[1] << 8 |
17792 /* If last frame of sequence we can return success. */
17793 if (fctl & FC_FC_END_SEQ)
17800 * lpfc_prep_seq - Prep sequence for ULP processing
17801 * @vport: Pointer to the vport on which this sequence was received
17802 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17804 * This function takes a sequence, described by a list of frames, and creates
17805 * a list of iocbq structures to describe the sequence. This iocbq list will be
17806 * used to issue to the generic unsolicited sequence handler. This routine
17807 * returns a pointer to the first iocbq in the list. If the function is unable
17808 * to allocate an iocbq then it throw out the received frames that were not
17809 * able to be described and return a pointer to the first iocbq. If unable to
17810 * allocate any iocbqs (including the first) this function will return NULL.
17812 static struct lpfc_iocbq *
17813 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17815 struct hbq_dmabuf *hbq_buf;
17816 struct lpfc_dmabuf *d_buf, *n_buf;
17817 struct lpfc_iocbq *first_iocbq, *iocbq;
17818 struct fc_frame_header *fc_hdr;
17820 uint32_t len, tot_len;
17821 struct ulp_bde64 *pbde;
17823 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17824 /* remove from receive buffer list */
17825 list_del_init(&seq_dmabuf->hbuf.list);
17826 lpfc_update_rcv_time_stamp(vport);
17827 /* get the Remote Port's SID */
17828 sid = sli4_sid_from_fc_hdr(fc_hdr);
17830 /* Get an iocbq struct to fill in. */
17831 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17833 /* Initialize the first IOCB. */
17834 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17835 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17836 first_iocbq->vport = vport;
17838 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17839 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17840 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17841 first_iocbq->iocb.un.rcvels.parmRo =
17842 sli4_did_from_fc_hdr(fc_hdr);
17843 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17845 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17846 first_iocbq->iocb.ulpContext = NO_XRI;
17847 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17848 be16_to_cpu(fc_hdr->fh_ox_id);
17849 /* iocbq is prepped for internal consumption. Physical vpi. */
17850 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17851 vport->phba->vpi_ids[vport->vpi];
17852 /* put the first buffer into the first IOCBq */
17853 tot_len = bf_get(lpfc_rcqe_length,
17854 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17856 first_iocbq->context2 = &seq_dmabuf->dbuf;
17857 first_iocbq->context3 = NULL;
17858 first_iocbq->iocb.ulpBdeCount = 1;
17859 if (tot_len > LPFC_DATA_BUF_SIZE)
17860 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17861 LPFC_DATA_BUF_SIZE;
17863 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17865 first_iocbq->iocb.un.rcvels.remoteID = sid;
17867 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17869 iocbq = first_iocbq;
17871 * Each IOCBq can have two Buffers assigned, so go through the list
17872 * of buffers for this sequence and save two buffers in each IOCBq
17874 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17876 lpfc_in_buf_free(vport->phba, d_buf);
17879 if (!iocbq->context3) {
17880 iocbq->context3 = d_buf;
17881 iocbq->iocb.ulpBdeCount++;
17882 /* We need to get the size out of the right CQE */
17883 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17884 len = bf_get(lpfc_rcqe_length,
17885 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17886 pbde = (struct ulp_bde64 *)
17887 &iocbq->iocb.unsli3.sli3Words[4];
17888 if (len > LPFC_DATA_BUF_SIZE)
17889 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17891 pbde->tus.f.bdeSize = len;
17893 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17896 iocbq = lpfc_sli_get_iocbq(vport->phba);
17899 first_iocbq->iocb.ulpStatus =
17900 IOSTAT_FCP_RSP_ERROR;
17901 first_iocbq->iocb.un.ulpWord[4] =
17902 IOERR_NO_RESOURCES;
17904 lpfc_in_buf_free(vport->phba, d_buf);
17907 /* We need to get the size out of the right CQE */
17908 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17909 len = bf_get(lpfc_rcqe_length,
17910 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17911 iocbq->context2 = d_buf;
17912 iocbq->context3 = NULL;
17913 iocbq->iocb.ulpBdeCount = 1;
17914 if (len > LPFC_DATA_BUF_SIZE)
17915 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17916 LPFC_DATA_BUF_SIZE;
17918 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17921 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17923 iocbq->iocb.un.rcvels.remoteID = sid;
17924 list_add_tail(&iocbq->list, &first_iocbq->list);
17927 return first_iocbq;
17931 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17932 struct hbq_dmabuf *seq_dmabuf)
17934 struct fc_frame_header *fc_hdr;
17935 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17936 struct lpfc_hba *phba = vport->phba;
17938 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17939 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17942 "2707 Ring %d handler: Failed to allocate "
17943 "iocb Rctl x%x Type x%x received\n",
17945 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17948 if (!lpfc_complete_unsol_iocb(phba,
17949 phba->sli4_hba.els_wq->pring,
17950 iocbq, fc_hdr->fh_r_ctl,
17952 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17953 "2540 Ring %d handler: unexpected Rctl "
17954 "x%x Type x%x received\n",
17956 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17958 /* Free iocb created in lpfc_prep_seq */
17959 list_for_each_entry_safe(curr_iocb, next_iocb,
17960 &iocbq->list, list) {
17961 list_del_init(&curr_iocb->list);
17962 lpfc_sli_release_iocbq(phba, curr_iocb);
17964 lpfc_sli_release_iocbq(phba, iocbq);
17968 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17969 struct lpfc_iocbq *rspiocb)
17971 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17973 if (pcmd && pcmd->virt)
17974 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17976 lpfc_sli_release_iocbq(phba, cmdiocb);
17977 lpfc_drain_txq(phba);
17981 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17982 struct hbq_dmabuf *dmabuf)
17984 struct fc_frame_header *fc_hdr;
17985 struct lpfc_hba *phba = vport->phba;
17986 struct lpfc_iocbq *iocbq = NULL;
17987 union lpfc_wqe *wqe;
17988 struct lpfc_dmabuf *pcmd = NULL;
17989 uint32_t frame_len;
17991 unsigned long iflags;
17993 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17994 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17996 /* Send the received frame back */
17997 iocbq = lpfc_sli_get_iocbq(phba);
17999 /* Queue cq event and wakeup worker thread to process it */
18000 spin_lock_irqsave(&phba->hbalock, iflags);
18001 list_add_tail(&dmabuf->cq_event.list,
18002 &phba->sli4_hba.sp_queue_event);
18003 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18004 spin_unlock_irqrestore(&phba->hbalock, iflags);
18005 lpfc_worker_wake_up(phba);
18009 /* Allocate buffer for command payload */
18010 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18012 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18014 if (!pcmd || !pcmd->virt)
18017 INIT_LIST_HEAD(&pcmd->list);
18019 /* copyin the payload */
18020 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18022 /* fill in BDE's for command */
18023 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18024 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18025 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18026 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18028 iocbq->context2 = pcmd;
18029 iocbq->vport = vport;
18030 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18031 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18034 * Setup rest of the iocb as though it were a WQE
18035 * Build the SEND_FRAME WQE
18037 wqe = (union lpfc_wqe *)&iocbq->iocb;
18039 wqe->send_frame.frame_len = frame_len;
18040 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18041 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18042 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18043 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18044 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18045 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18047 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18048 iocbq->iocb.ulpLe = 1;
18049 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18050 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18051 if (rc == IOCB_ERROR)
18054 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18058 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18059 "2023 Unable to process MDS loopback frame\n");
18060 if (pcmd && pcmd->virt)
18061 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18064 lpfc_sli_release_iocbq(phba, iocbq);
18065 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18069 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18070 * @phba: Pointer to HBA context object.
18072 * This function is called with no lock held. This function processes all
18073 * the received buffers and gives it to upper layers when a received buffer
18074 * indicates that it is the final frame in the sequence. The interrupt
18075 * service routine processes received buffers at interrupt contexts.
18076 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18077 * appropriate receive function when the final frame in a sequence is received.
18080 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18081 struct hbq_dmabuf *dmabuf)
18083 struct hbq_dmabuf *seq_dmabuf;
18084 struct fc_frame_header *fc_hdr;
18085 struct lpfc_vport *vport;
18089 /* Process each received buffer */
18090 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18092 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18093 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18094 vport = phba->pport;
18095 /* Handle MDS Loopback frames */
18096 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18100 /* check to see if this a valid type of frame */
18101 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18102 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18106 if ((bf_get(lpfc_cqe_code,
18107 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18108 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18109 &dmabuf->cq_event.cqe.rcqe_cmpl);
18111 fcfi = bf_get(lpfc_rcqe_fcf_id,
18112 &dmabuf->cq_event.cqe.rcqe_cmpl);
18114 /* d_id this frame is directed to */
18115 did = sli4_did_from_fc_hdr(fc_hdr);
18117 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18119 /* throw out the frame */
18120 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18124 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18125 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18126 (did != Fabric_DID)) {
18128 * Throw out the frame if we are not pt2pt.
18129 * The pt2pt protocol allows for discovery frames
18130 * to be received without a registered VPI.
18132 if (!(vport->fc_flag & FC_PT2PT) ||
18133 (phba->link_state == LPFC_HBA_READY)) {
18134 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18139 /* Handle the basic abort sequence (BA_ABTS) event */
18140 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18141 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18145 /* Link this frame */
18146 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18148 /* unable to add frame to vport - throw it out */
18149 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18152 /* If not last frame in sequence continue processing frames. */
18153 if (!lpfc_seq_complete(seq_dmabuf))
18156 /* Send the complete sequence to the upper layer protocol */
18157 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18161 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18162 * @phba: pointer to lpfc hba data structure.
18164 * This routine is invoked to post rpi header templates to the
18165 * HBA consistent with the SLI-4 interface spec. This routine
18166 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18167 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18169 * This routine does not require any locks. It's usage is expected
18170 * to be driver load or reset recovery when the driver is
18175 * -EIO - The mailbox failed to complete successfully.
18176 * When this error occurs, the driver is not guaranteed
18177 * to have any rpi regions posted to the device and
18178 * must either attempt to repost the regions or take a
18182 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18184 struct lpfc_rpi_hdr *rpi_page;
18188 /* SLI4 ports that support extents do not require RPI headers. */
18189 if (!phba->sli4_hba.rpi_hdrs_in_use)
18191 if (phba->sli4_hba.extents_in_use)
18194 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18196 * Assign the rpi headers a physical rpi only if the driver
18197 * has not initialized those resources. A port reset only
18198 * needs the headers posted.
18200 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18202 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18204 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18205 if (rc != MBX_SUCCESS) {
18206 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18207 "2008 Error %d posting all rpi "
18215 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18216 LPFC_RPI_RSRC_RDY);
18221 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18222 * @phba: pointer to lpfc hba data structure.
18223 * @rpi_page: pointer to the rpi memory region.
18225 * This routine is invoked to post a single rpi header to the
18226 * HBA consistent with the SLI-4 interface spec. This memory region
18227 * maps up to 64 rpi context regions.
18231 * -ENOMEM - No available memory
18232 * -EIO - The mailbox failed to complete successfully.
18235 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18237 LPFC_MBOXQ_t *mboxq;
18238 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18240 uint32_t shdr_status, shdr_add_status;
18241 union lpfc_sli4_cfg_shdr *shdr;
18243 /* SLI4 ports that support extents do not require RPI headers. */
18244 if (!phba->sli4_hba.rpi_hdrs_in_use)
18246 if (phba->sli4_hba.extents_in_use)
18249 /* The port is notified of the header region via a mailbox command. */
18250 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18252 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18253 "2001 Unable to allocate memory for issuing "
18254 "SLI_CONFIG_SPECIAL mailbox command\n");
18258 /* Post all rpi memory regions to the port. */
18259 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18260 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18261 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18262 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18263 sizeof(struct lpfc_sli4_cfg_mhdr),
18264 LPFC_SLI4_MBX_EMBED);
18267 /* Post the physical rpi to the port for this rpi header. */
18268 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18269 rpi_page->start_rpi);
18270 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18271 hdr_tmpl, rpi_page->page_count);
18273 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18274 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18275 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18276 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18277 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18278 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18279 if (rc != MBX_TIMEOUT)
18280 mempool_free(mboxq, phba->mbox_mem_pool);
18281 if (shdr_status || shdr_add_status || rc) {
18282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18283 "2514 POST_RPI_HDR mailbox failed with "
18284 "status x%x add_status x%x, mbx status x%x\n",
18285 shdr_status, shdr_add_status, rc);
18289 * The next_rpi stores the next logical module-64 rpi value used
18290 * to post physical rpis in subsequent rpi postings.
18292 spin_lock_irq(&phba->hbalock);
18293 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18294 spin_unlock_irq(&phba->hbalock);
18300 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18301 * @phba: pointer to lpfc hba data structure.
18303 * This routine is invoked to post rpi header templates to the
18304 * HBA consistent with the SLI-4 interface spec. This routine
18305 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18306 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18309 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18310 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18313 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18316 uint16_t max_rpi, rpi_limit;
18317 uint16_t rpi_remaining, lrpi = 0;
18318 struct lpfc_rpi_hdr *rpi_hdr;
18319 unsigned long iflag;
18322 * Fetch the next logical rpi. Because this index is logical,
18323 * the driver starts at 0 each time.
18325 spin_lock_irqsave(&phba->hbalock, iflag);
18326 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18327 rpi_limit = phba->sli4_hba.next_rpi;
18329 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18330 if (rpi >= rpi_limit)
18331 rpi = LPFC_RPI_ALLOC_ERROR;
18333 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18334 phba->sli4_hba.max_cfg_param.rpi_used++;
18335 phba->sli4_hba.rpi_count++;
18337 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18338 "0001 rpi:%x max:%x lim:%x\n",
18339 (int) rpi, max_rpi, rpi_limit);
18342 * Don't try to allocate more rpi header regions if the device limit
18343 * has been exhausted.
18345 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18346 (phba->sli4_hba.rpi_count >= max_rpi)) {
18347 spin_unlock_irqrestore(&phba->hbalock, iflag);
18352 * RPI header postings are not required for SLI4 ports capable of
18355 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18356 spin_unlock_irqrestore(&phba->hbalock, iflag);
18361 * If the driver is running low on rpi resources, allocate another
18362 * page now. Note that the next_rpi value is used because
18363 * it represents how many are actually in use whereas max_rpi notes
18364 * how many are supported max by the device.
18366 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18367 spin_unlock_irqrestore(&phba->hbalock, iflag);
18368 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18369 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18371 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18372 "2002 Error Could not grow rpi "
18375 lrpi = rpi_hdr->start_rpi;
18376 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18377 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18385 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18386 * @phba: pointer to lpfc hba data structure.
18388 * This routine is invoked to release an rpi to the pool of
18389 * available rpis maintained by the driver.
18392 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18394 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18395 phba->sli4_hba.rpi_count--;
18396 phba->sli4_hba.max_cfg_param.rpi_used--;
18401 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18402 * @phba: pointer to lpfc hba data structure.
18404 * This routine is invoked to release an rpi to the pool of
18405 * available rpis maintained by the driver.
18408 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18410 spin_lock_irq(&phba->hbalock);
18411 __lpfc_sli4_free_rpi(phba, rpi);
18412 spin_unlock_irq(&phba->hbalock);
18416 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18417 * @phba: pointer to lpfc hba data structure.
18419 * This routine is invoked to remove the memory region that
18420 * provided rpi via a bitmask.
18423 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18425 kfree(phba->sli4_hba.rpi_bmask);
18426 kfree(phba->sli4_hba.rpi_ids);
18427 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18431 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18432 * @phba: pointer to lpfc hba data structure.
18434 * This routine is invoked to remove the memory region that
18435 * provided rpi via a bitmask.
18438 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18439 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18441 LPFC_MBOXQ_t *mboxq;
18442 struct lpfc_hba *phba = ndlp->phba;
18445 /* The port is notified of the header region via a mailbox command. */
18446 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18450 /* Post all rpi memory regions to the port. */
18451 lpfc_resume_rpi(mboxq, ndlp);
18453 mboxq->mbox_cmpl = cmpl;
18454 mboxq->ctx_buf = arg;
18455 mboxq->ctx_ndlp = ndlp;
18457 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18458 mboxq->vport = ndlp->vport;
18459 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18460 if (rc == MBX_NOT_FINISHED) {
18461 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18462 "2010 Resume RPI Mailbox failed "
18463 "status %d, mbxStatus x%x\n", rc,
18464 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18465 mempool_free(mboxq, phba->mbox_mem_pool);
18472 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18473 * @vport: Pointer to the vport for which the vpi is being initialized
18475 * This routine is invoked to activate a vpi with the port.
18479 * -Evalue otherwise
18482 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18484 LPFC_MBOXQ_t *mboxq;
18486 int retval = MBX_SUCCESS;
18488 struct lpfc_hba *phba = vport->phba;
18489 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18492 lpfc_init_vpi(phba, mboxq, vport->vpi);
18493 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18494 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18495 if (rc != MBX_SUCCESS) {
18496 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18497 "2022 INIT VPI Mailbox failed "
18498 "status %d, mbxStatus x%x\n", rc,
18499 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18502 if (rc != MBX_TIMEOUT)
18503 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18509 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18510 * @phba: pointer to lpfc hba data structure.
18511 * @mboxq: Pointer to mailbox object.
18513 * This routine is invoked to manually add a single FCF record. The caller
18514 * must pass a completely initialized FCF_Record. This routine takes
18515 * care of the nonembedded mailbox operations.
18518 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18521 union lpfc_sli4_cfg_shdr *shdr;
18522 uint32_t shdr_status, shdr_add_status;
18524 virt_addr = mboxq->sge_array->addr[0];
18525 /* The IOCTL status is embedded in the mailbox subheader. */
18526 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18527 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18528 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18530 if ((shdr_status || shdr_add_status) &&
18531 (shdr_status != STATUS_FCF_IN_USE))
18532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18533 "2558 ADD_FCF_RECORD mailbox failed with "
18534 "status x%x add_status x%x\n",
18535 shdr_status, shdr_add_status);
18537 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18541 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18542 * @phba: pointer to lpfc hba data structure.
18543 * @fcf_record: pointer to the initialized fcf record to add.
18545 * This routine is invoked to manually add a single FCF record. The caller
18546 * must pass a completely initialized FCF_Record. This routine takes
18547 * care of the nonembedded mailbox operations.
18550 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18553 LPFC_MBOXQ_t *mboxq;
18556 struct lpfc_mbx_sge sge;
18557 uint32_t alloc_len, req_len;
18560 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18563 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18567 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18570 /* Allocate DMA memory and set up the non-embedded mailbox command */
18571 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18572 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18573 req_len, LPFC_SLI4_MBX_NEMBED);
18574 if (alloc_len < req_len) {
18575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18576 "2523 Allocated DMA memory size (x%x) is "
18577 "less than the requested DMA memory "
18578 "size (x%x)\n", alloc_len, req_len);
18579 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18584 * Get the first SGE entry from the non-embedded DMA memory. This
18585 * routine only uses a single SGE.
18587 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18588 virt_addr = mboxq->sge_array->addr[0];
18590 * Configure the FCF record for FCFI 0. This is the driver's
18591 * hardcoded default and gets used in nonFIP mode.
18593 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18594 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18595 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18598 * Copy the fcf_index and the FCF Record Data. The data starts after
18599 * the FCoE header plus word10. The data copy needs to be endian
18602 bytep += sizeof(uint32_t);
18603 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18604 mboxq->vport = phba->pport;
18605 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18606 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18607 if (rc == MBX_NOT_FINISHED) {
18608 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18609 "2515 ADD_FCF_RECORD mailbox failed with "
18610 "status 0x%x\n", rc);
18611 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18620 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18621 * @phba: pointer to lpfc hba data structure.
18622 * @fcf_record: pointer to the fcf record to write the default data.
18623 * @fcf_index: FCF table entry index.
18625 * This routine is invoked to build the driver's default FCF record. The
18626 * values used are hardcoded. This routine handles memory initialization.
18630 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18631 struct fcf_record *fcf_record,
18632 uint16_t fcf_index)
18634 memset(fcf_record, 0, sizeof(struct fcf_record));
18635 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18636 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18637 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18638 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18639 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18640 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18641 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18642 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18643 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18644 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18645 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18646 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18647 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18648 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18649 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18650 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18651 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18652 /* Set the VLAN bit map */
18653 if (phba->valid_vlan) {
18654 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18655 = 1 << (phba->vlan_id % 8);
18660 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18661 * @phba: pointer to lpfc hba data structure.
18662 * @fcf_index: FCF table entry offset.
18664 * This routine is invoked to scan the entire FCF table by reading FCF
18665 * record and processing it one at a time starting from the @fcf_index
18666 * for initial FCF discovery or fast FCF failover rediscovery.
18668 * Return 0 if the mailbox command is submitted successfully, none 0
18672 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18675 LPFC_MBOXQ_t *mboxq;
18677 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18678 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18679 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18682 "2000 Failed to allocate mbox for "
18685 goto fail_fcf_scan;
18687 /* Construct the read FCF record mailbox command */
18688 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18691 goto fail_fcf_scan;
18693 /* Issue the mailbox command asynchronously */
18694 mboxq->vport = phba->pport;
18695 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18697 spin_lock_irq(&phba->hbalock);
18698 phba->hba_flag |= FCF_TS_INPROG;
18699 spin_unlock_irq(&phba->hbalock);
18701 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18702 if (rc == MBX_NOT_FINISHED)
18705 /* Reset eligible FCF count for new scan */
18706 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18707 phba->fcf.eligible_fcf_cnt = 0;
18713 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18714 /* FCF scan failed, clear FCF_TS_INPROG flag */
18715 spin_lock_irq(&phba->hbalock);
18716 phba->hba_flag &= ~FCF_TS_INPROG;
18717 spin_unlock_irq(&phba->hbalock);
18723 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18724 * @phba: pointer to lpfc hba data structure.
18725 * @fcf_index: FCF table entry offset.
18727 * This routine is invoked to read an FCF record indicated by @fcf_index
18728 * and to use it for FLOGI roundrobin FCF failover.
18730 * Return 0 if the mailbox command is submitted successfully, none 0
18734 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18737 LPFC_MBOXQ_t *mboxq;
18739 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18741 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18742 "2763 Failed to allocate mbox for "
18745 goto fail_fcf_read;
18747 /* Construct the read FCF record mailbox command */
18748 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18751 goto fail_fcf_read;
18753 /* Issue the mailbox command asynchronously */
18754 mboxq->vport = phba->pport;
18755 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18756 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18757 if (rc == MBX_NOT_FINISHED)
18763 if (error && mboxq)
18764 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18769 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18770 * @phba: pointer to lpfc hba data structure.
18771 * @fcf_index: FCF table entry offset.
18773 * This routine is invoked to read an FCF record indicated by @fcf_index to
18774 * determine whether it's eligible for FLOGI roundrobin failover list.
18776 * Return 0 if the mailbox command is submitted successfully, none 0
18780 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18783 LPFC_MBOXQ_t *mboxq;
18785 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18787 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18788 "2758 Failed to allocate mbox for "
18791 goto fail_fcf_read;
18793 /* Construct the read FCF record mailbox command */
18794 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18797 goto fail_fcf_read;
18799 /* Issue the mailbox command asynchronously */
18800 mboxq->vport = phba->pport;
18801 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18802 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18803 if (rc == MBX_NOT_FINISHED)
18809 if (error && mboxq)
18810 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18815 * lpfc_check_next_fcf_pri_level
18816 * phba pointer to the lpfc_hba struct for this port.
18817 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18818 * routine when the rr_bmask is empty. The FCF indecies are put into the
18819 * rr_bmask based on their priority level. Starting from the highest priority
18820 * to the lowest. The most likely FCF candidate will be in the highest
18821 * priority group. When this routine is called it searches the fcf_pri list for
18822 * next lowest priority group and repopulates the rr_bmask with only those
18825 * 1=success 0=failure
18828 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18830 uint16_t next_fcf_pri;
18831 uint16_t last_index;
18832 struct lpfc_fcf_pri *fcf_pri;
18836 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18837 LPFC_SLI4_FCF_TBL_INDX_MAX);
18838 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18839 "3060 Last IDX %d\n", last_index);
18841 /* Verify the priority list has 2 or more entries */
18842 spin_lock_irq(&phba->hbalock);
18843 if (list_empty(&phba->fcf.fcf_pri_list) ||
18844 list_is_singular(&phba->fcf.fcf_pri_list)) {
18845 spin_unlock_irq(&phba->hbalock);
18846 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18847 "3061 Last IDX %d\n", last_index);
18848 return 0; /* Empty rr list */
18850 spin_unlock_irq(&phba->hbalock);
18854 * Clear the rr_bmask and set all of the bits that are at this
18857 memset(phba->fcf.fcf_rr_bmask, 0,
18858 sizeof(*phba->fcf.fcf_rr_bmask));
18859 spin_lock_irq(&phba->hbalock);
18860 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18861 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18864 * the 1st priority that has not FLOGI failed
18865 * will be the highest.
18868 next_fcf_pri = fcf_pri->fcf_rec.priority;
18869 spin_unlock_irq(&phba->hbalock);
18870 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18871 rc = lpfc_sli4_fcf_rr_index_set(phba,
18872 fcf_pri->fcf_rec.fcf_index);
18876 spin_lock_irq(&phba->hbalock);
18879 * if next_fcf_pri was not set above and the list is not empty then
18880 * we have failed flogis on all of them. So reset flogi failed
18881 * and start at the beginning.
18883 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18884 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18885 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18887 * the 1st priority that has not FLOGI failed
18888 * will be the highest.
18891 next_fcf_pri = fcf_pri->fcf_rec.priority;
18892 spin_unlock_irq(&phba->hbalock);
18893 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18894 rc = lpfc_sli4_fcf_rr_index_set(phba,
18895 fcf_pri->fcf_rec.fcf_index);
18899 spin_lock_irq(&phba->hbalock);
18903 spin_unlock_irq(&phba->hbalock);
18908 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18909 * @phba: pointer to lpfc hba data structure.
18911 * This routine is to get the next eligible FCF record index in a round
18912 * robin fashion. If the next eligible FCF record index equals to the
18913 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18914 * shall be returned, otherwise, the next eligible FCF record's index
18915 * shall be returned.
18918 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18920 uint16_t next_fcf_index;
18923 /* Search start from next bit of currently registered FCF index */
18924 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18927 /* Determine the next fcf index to check */
18928 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18929 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18930 LPFC_SLI4_FCF_TBL_INDX_MAX,
18933 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18934 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18936 * If we have wrapped then we need to clear the bits that
18937 * have been tested so that we can detect when we should
18938 * change the priority level.
18940 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18941 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18945 /* Check roundrobin failover list empty condition */
18946 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18947 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18949 * If next fcf index is not found check if there are lower
18950 * Priority level fcf's in the fcf_priority list.
18951 * Set up the rr_bmask with all of the avaiable fcf bits
18952 * at that level and continue the selection process.
18954 if (lpfc_check_next_fcf_pri_level(phba))
18955 goto initial_priority;
18956 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18957 "2844 No roundrobin failover FCF available\n");
18959 return LPFC_FCOE_FCF_NEXT_NONE;
18962 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18963 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18964 LPFC_FCF_FLOGI_FAILED) {
18965 if (list_is_singular(&phba->fcf.fcf_pri_list))
18966 return LPFC_FCOE_FCF_NEXT_NONE;
18968 goto next_priority;
18971 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18972 "2845 Get next roundrobin failover FCF (x%x)\n",
18975 return next_fcf_index;
18979 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18980 * @phba: pointer to lpfc hba data structure.
18982 * This routine sets the FCF record index in to the eligible bmask for
18983 * roundrobin failover search. It checks to make sure that the index
18984 * does not go beyond the range of the driver allocated bmask dimension
18985 * before setting the bit.
18987 * Returns 0 if the index bit successfully set, otherwise, it returns
18991 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18993 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18994 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18995 "2610 FCF (x%x) reached driver's book "
18996 "keeping dimension:x%x\n",
18997 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19000 /* Set the eligible FCF record index bmask */
19001 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19003 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19004 "2790 Set FCF (x%x) to roundrobin FCF failover "
19005 "bmask\n", fcf_index);
19011 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19012 * @phba: pointer to lpfc hba data structure.
19014 * This routine clears the FCF record index from the eligible bmask for
19015 * roundrobin failover search. It checks to make sure that the index
19016 * does not go beyond the range of the driver allocated bmask dimension
19017 * before clearing the bit.
19020 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19022 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19023 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19024 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19025 "2762 FCF (x%x) reached driver's book "
19026 "keeping dimension:x%x\n",
19027 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19030 /* Clear the eligible FCF record index bmask */
19031 spin_lock_irq(&phba->hbalock);
19032 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19034 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19035 list_del_init(&fcf_pri->list);
19039 spin_unlock_irq(&phba->hbalock);
19040 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19042 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19043 "2791 Clear FCF (x%x) from roundrobin failover "
19044 "bmask\n", fcf_index);
19048 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19049 * @phba: pointer to lpfc hba data structure.
19051 * This routine is the completion routine for the rediscover FCF table mailbox
19052 * command. If the mailbox command returned failure, it will try to stop the
19053 * FCF rediscover wait timer.
19056 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19058 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19059 uint32_t shdr_status, shdr_add_status;
19061 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19063 shdr_status = bf_get(lpfc_mbox_hdr_status,
19064 &redisc_fcf->header.cfg_shdr.response);
19065 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19066 &redisc_fcf->header.cfg_shdr.response);
19067 if (shdr_status || shdr_add_status) {
19068 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19069 "2746 Requesting for FCF rediscovery failed "
19070 "status x%x add_status x%x\n",
19071 shdr_status, shdr_add_status);
19072 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19073 spin_lock_irq(&phba->hbalock);
19074 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19075 spin_unlock_irq(&phba->hbalock);
19077 * CVL event triggered FCF rediscover request failed,
19078 * last resort to re-try current registered FCF entry.
19080 lpfc_retry_pport_discovery(phba);
19082 spin_lock_irq(&phba->hbalock);
19083 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19084 spin_unlock_irq(&phba->hbalock);
19086 * DEAD FCF event triggered FCF rediscover request
19087 * failed, last resort to fail over as a link down
19088 * to FCF registration.
19090 lpfc_sli4_fcf_dead_failthrough(phba);
19093 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19094 "2775 Start FCF rediscover quiescent timer\n");
19096 * Start FCF rediscovery wait timer for pending FCF
19097 * before rescan FCF record table.
19099 lpfc_fcf_redisc_wait_start_timer(phba);
19102 mempool_free(mbox, phba->mbox_mem_pool);
19106 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19107 * @phba: pointer to lpfc hba data structure.
19109 * This routine is invoked to request for rediscovery of the entire FCF table
19113 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19115 LPFC_MBOXQ_t *mbox;
19116 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19119 /* Cancel retry delay timers to all vports before FCF rediscover */
19120 lpfc_cancel_all_vport_retry_delay_timer(phba);
19122 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19124 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19125 "2745 Failed to allocate mbox for "
19126 "requesting FCF rediscover.\n");
19130 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19131 sizeof(struct lpfc_sli4_cfg_mhdr));
19132 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19133 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19134 length, LPFC_SLI4_MBX_EMBED);
19136 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19137 /* Set count to 0 for invalidating the entire FCF database */
19138 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19140 /* Issue the mailbox command asynchronously */
19141 mbox->vport = phba->pport;
19142 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19143 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19145 if (rc == MBX_NOT_FINISHED) {
19146 mempool_free(mbox, phba->mbox_mem_pool);
19153 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19154 * @phba: pointer to lpfc hba data structure.
19156 * This function is the failover routine as a last resort to the FCF DEAD
19157 * event when driver failed to perform fast FCF failover.
19160 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19162 uint32_t link_state;
19165 * Last resort as FCF DEAD event failover will treat this as
19166 * a link down, but save the link state because we don't want
19167 * it to be changed to Link Down unless it is already down.
19169 link_state = phba->link_state;
19170 lpfc_linkdown(phba);
19171 phba->link_state = link_state;
19173 /* Unregister FCF if no devices connected to it */
19174 lpfc_unregister_unused_fcf(phba);
19178 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19179 * @phba: pointer to lpfc hba data structure.
19180 * @rgn23_data: pointer to configure region 23 data.
19182 * This function gets SLI3 port configure region 23 data through memory dump
19183 * mailbox command. When it successfully retrieves data, the size of the data
19184 * will be returned, otherwise, 0 will be returned.
19187 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19189 LPFC_MBOXQ_t *pmb = NULL;
19191 uint32_t offset = 0;
19197 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19200 "2600 failed to allocate mailbox memory\n");
19206 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19207 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19209 if (rc != MBX_SUCCESS) {
19210 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19211 "2601 failed to read config "
19212 "region 23, rc 0x%x Status 0x%x\n",
19213 rc, mb->mbxStatus);
19214 mb->un.varDmp.word_cnt = 0;
19217 * dump mem may return a zero when finished or we got a
19218 * mailbox error, either way we are done.
19220 if (mb->un.varDmp.word_cnt == 0)
19222 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19223 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19225 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19226 rgn23_data + offset,
19227 mb->un.varDmp.word_cnt);
19228 offset += mb->un.varDmp.word_cnt;
19229 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19231 mempool_free(pmb, phba->mbox_mem_pool);
19236 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19237 * @phba: pointer to lpfc hba data structure.
19238 * @rgn23_data: pointer to configure region 23 data.
19240 * This function gets SLI4 port configure region 23 data through memory dump
19241 * mailbox command. When it successfully retrieves data, the size of the data
19242 * will be returned, otherwise, 0 will be returned.
19245 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19247 LPFC_MBOXQ_t *mboxq = NULL;
19248 struct lpfc_dmabuf *mp = NULL;
19249 struct lpfc_mqe *mqe;
19250 uint32_t data_length = 0;
19256 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19259 "3105 failed to allocate mailbox memory\n");
19263 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19265 mqe = &mboxq->u.mqe;
19266 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19267 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19270 data_length = mqe->un.mb_words[5];
19271 if (data_length == 0)
19273 if (data_length > DMP_RGN23_SIZE) {
19277 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19279 mempool_free(mboxq, phba->mbox_mem_pool);
19281 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19284 return data_length;
19288 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19289 * @phba: pointer to lpfc hba data structure.
19291 * This function read region 23 and parse TLV for port status to
19292 * decide if the user disaled the port. If the TLV indicates the
19293 * port is disabled, the hba_flag is set accordingly.
19296 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19298 uint8_t *rgn23_data = NULL;
19299 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19300 uint32_t offset = 0;
19302 /* Get adapter Region 23 data */
19303 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19307 if (phba->sli_rev < LPFC_SLI_REV4)
19308 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19310 if_type = bf_get(lpfc_sli_intf_if_type,
19311 &phba->sli4_hba.sli_intf);
19312 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19314 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19320 /* Check the region signature first */
19321 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19322 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19323 "2619 Config region 23 has bad signature\n");
19328 /* Check the data structure version */
19329 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19331 "2620 Config region 23 has bad version\n");
19336 /* Parse TLV entries in the region */
19337 while (offset < data_size) {
19338 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19341 * If the TLV is not driver specific TLV or driver id is
19342 * not linux driver id, skip the record.
19344 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19345 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19346 (rgn23_data[offset + 3] != 0)) {
19347 offset += rgn23_data[offset + 1] * 4 + 4;
19351 /* Driver found a driver specific TLV in the config region */
19352 sub_tlv_len = rgn23_data[offset + 1] * 4;
19357 * Search for configured port state sub-TLV.
19359 while ((offset < data_size) &&
19360 (tlv_offset < sub_tlv_len)) {
19361 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19366 if (rgn23_data[offset] != PORT_STE_TYPE) {
19367 offset += rgn23_data[offset + 1] * 4 + 4;
19368 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19372 /* This HBA contains PORT_STE configured */
19373 if (!rgn23_data[offset + 2])
19374 phba->hba_flag |= LINK_DISABLED;
19386 * lpfc_wr_object - write an object to the firmware
19387 * @phba: HBA structure that indicates port to create a queue on.
19388 * @dmabuf_list: list of dmabufs to write to the port.
19389 * @size: the total byte value of the objects to write to the port.
19390 * @offset: the current offset to be used to start the transfer.
19392 * This routine will create a wr_object mailbox command to send to the port.
19393 * the mailbox command will be constructed using the dma buffers described in
19394 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19395 * BDEs that the imbedded mailbox can support. The @offset variable will be
19396 * used to indicate the starting offset of the transfer and will also return
19397 * the offset after the write object mailbox has completed. @size is used to
19398 * determine the end of the object and whether the eof bit should be set.
19400 * Return 0 is successful and offset will contain the the new offset to use
19401 * for the next write.
19402 * Return negative value for error cases.
19405 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19406 uint32_t size, uint32_t *offset)
19408 struct lpfc_mbx_wr_object *wr_object;
19409 LPFC_MBOXQ_t *mbox;
19411 uint32_t shdr_status, shdr_add_status, shdr_change_status;
19413 struct lpfc_dmabuf *dmabuf;
19414 uint32_t written = 0;
19415 bool check_change_status = false;
19417 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19421 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19422 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19423 sizeof(struct lpfc_mbx_wr_object) -
19424 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19426 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19427 wr_object->u.request.write_offset = *offset;
19428 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19429 wr_object->u.request.object_name[0] =
19430 cpu_to_le32(wr_object->u.request.object_name[0]);
19431 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19432 list_for_each_entry(dmabuf, dmabuf_list, list) {
19433 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19435 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19436 wr_object->u.request.bde[i].addrHigh =
19437 putPaddrHigh(dmabuf->phys);
19438 if (written + SLI4_PAGE_SIZE >= size) {
19439 wr_object->u.request.bde[i].tus.f.bdeSize =
19441 written += (size - written);
19442 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19443 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19444 check_change_status = true;
19446 wr_object->u.request.bde[i].tus.f.bdeSize =
19448 written += SLI4_PAGE_SIZE;
19452 wr_object->u.request.bde_count = i;
19453 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19454 if (!phba->sli4_hba.intr_enable)
19455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19457 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19458 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19460 /* The IOCTL status is embedded in the mailbox subheader. */
19461 shdr_status = bf_get(lpfc_mbox_hdr_status,
19462 &wr_object->header.cfg_shdr.response);
19463 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19464 &wr_object->header.cfg_shdr.response);
19465 if (check_change_status) {
19466 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19467 &wr_object->u.response);
19468 switch (shdr_change_status) {
19469 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19471 "3198 Firmware write complete: System "
19472 "reboot required to instantiate\n");
19474 case (LPFC_CHANGE_STATUS_FW_RESET):
19475 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19476 "3199 Firmware write complete: Firmware"
19477 " reset required to instantiate\n");
19479 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19480 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19481 "3200 Firmware write complete: Port "
19482 "Migration or PCI Reset required to "
19485 case (LPFC_CHANGE_STATUS_PCI_RESET):
19486 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19487 "3201 Firmware write complete: PCI "
19488 "Reset required to instantiate\n");
19494 if (rc != MBX_TIMEOUT)
19495 mempool_free(mbox, phba->mbox_mem_pool);
19496 if (shdr_status || shdr_add_status || rc) {
19497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19498 "3025 Write Object mailbox failed with "
19499 "status x%x add_status x%x, mbx status x%x\n",
19500 shdr_status, shdr_add_status, rc);
19502 *offset = shdr_add_status;
19504 *offset += wr_object->u.response.actual_write_length;
19509 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19510 * @vport: pointer to vport data structure.
19512 * This function iterate through the mailboxq and clean up all REG_LOGIN
19513 * and REG_VPI mailbox commands associated with the vport. This function
19514 * is called when driver want to restart discovery of the vport due to
19515 * a Clear Virtual Link event.
19518 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19520 struct lpfc_hba *phba = vport->phba;
19521 LPFC_MBOXQ_t *mb, *nextmb;
19522 struct lpfc_dmabuf *mp;
19523 struct lpfc_nodelist *ndlp;
19524 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19525 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19526 LIST_HEAD(mbox_cmd_list);
19527 uint8_t restart_loop;
19529 /* Clean up internally queued mailbox commands with the vport */
19530 spin_lock_irq(&phba->hbalock);
19531 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19532 if (mb->vport != vport)
19535 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19536 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19539 list_del(&mb->list);
19540 list_add_tail(&mb->list, &mbox_cmd_list);
19542 /* Clean up active mailbox command with the vport */
19543 mb = phba->sli.mbox_active;
19544 if (mb && (mb->vport == vport)) {
19545 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19546 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19547 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19548 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19549 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19550 /* Put reference count for delayed processing */
19551 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19552 /* Unregister the RPI when mailbox complete */
19553 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19556 /* Cleanup any mailbox completions which are not yet processed */
19559 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19561 * If this mailox is already processed or it is
19562 * for another vport ignore it.
19564 if ((mb->vport != vport) ||
19565 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19568 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19569 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19572 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19573 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19574 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19575 /* Unregister the RPI when mailbox complete */
19576 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19578 spin_unlock_irq(&phba->hbalock);
19579 spin_lock(shost->host_lock);
19580 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19581 spin_unlock(shost->host_lock);
19582 spin_lock_irq(&phba->hbalock);
19586 } while (restart_loop);
19588 spin_unlock_irq(&phba->hbalock);
19590 /* Release the cleaned-up mailbox commands */
19591 while (!list_empty(&mbox_cmd_list)) {
19592 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19593 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19594 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19596 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19599 mb->ctx_buf = NULL;
19600 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19601 mb->ctx_ndlp = NULL;
19603 spin_lock(shost->host_lock);
19604 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19605 spin_unlock(shost->host_lock);
19606 lpfc_nlp_put(ndlp);
19609 mempool_free(mb, phba->mbox_mem_pool);
19612 /* Release the ndlp with the cleaned-up active mailbox command */
19613 if (act_mbx_ndlp) {
19614 spin_lock(shost->host_lock);
19615 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19616 spin_unlock(shost->host_lock);
19617 lpfc_nlp_put(act_mbx_ndlp);
19622 * lpfc_drain_txq - Drain the txq
19623 * @phba: Pointer to HBA context object.
19625 * This function attempt to submit IOCBs on the txq
19626 * to the adapter. For SLI4 adapters, the txq contains
19627 * ELS IOCBs that have been deferred because the there
19628 * are no SGLs. This congestion can occur with large
19629 * vport counts during node discovery.
19633 lpfc_drain_txq(struct lpfc_hba *phba)
19635 LIST_HEAD(completions);
19636 struct lpfc_sli_ring *pring;
19637 struct lpfc_iocbq *piocbq = NULL;
19638 unsigned long iflags = 0;
19639 char *fail_msg = NULL;
19640 struct lpfc_sglq *sglq;
19641 union lpfc_wqe128 wqe;
19642 uint32_t txq_cnt = 0;
19643 struct lpfc_queue *wq;
19645 if (phba->link_flag & LS_MDS_LOOPBACK) {
19646 /* MDS WQE are posted only to first WQ*/
19647 wq = phba->sli4_hba.fcp_wq[0];
19652 wq = phba->sli4_hba.els_wq;
19655 pring = lpfc_phba_elsring(phba);
19658 if (unlikely(!pring) || list_empty(&pring->txq))
19661 spin_lock_irqsave(&pring->ring_lock, iflags);
19662 list_for_each_entry(piocbq, &pring->txq, list) {
19666 if (txq_cnt > pring->txq_max)
19667 pring->txq_max = txq_cnt;
19669 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19671 while (!list_empty(&pring->txq)) {
19672 spin_lock_irqsave(&pring->ring_lock, iflags);
19674 piocbq = lpfc_sli_ringtx_get(phba, pring);
19676 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19678 "2823 txq empty and txq_cnt is %d\n ",
19682 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19684 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19685 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19690 /* The xri and iocb resources secured,
19691 * attempt to issue request
19693 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19694 piocbq->sli4_xritag = sglq->sli4_xritag;
19695 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19696 fail_msg = "to convert bpl to sgl";
19697 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19698 fail_msg = "to convert iocb to wqe";
19699 else if (lpfc_sli4_wq_put(wq, &wqe))
19700 fail_msg = " - Wq is full";
19702 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19705 /* Failed means we can't issue and need to cancel */
19706 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19707 "2822 IOCB failed %s iotag 0x%x "
19710 piocbq->iotag, piocbq->sli4_xritag);
19711 list_add_tail(&piocbq->list, &completions);
19713 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19716 /* Cancel all the IOCBs that cannot be issued */
19717 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19718 IOERR_SLI_ABORTED);
19724 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19725 * @phba: Pointer to HBA context object.
19726 * @pwqe: Pointer to command WQE.
19727 * @sglq: Pointer to the scatter gather queue object.
19729 * This routine converts the bpl or bde that is in the WQE
19730 * to a sgl list for the sli4 hardware. The physical address
19731 * of the bpl/bde is converted back to a virtual address.
19732 * If the WQE contains a BPL then the list of BDE's is
19733 * converted to sli4_sge's. If the WQE contains a single
19734 * BDE then it is converted to a single sli_sge.
19735 * The WQE is still in cpu endianness so the contents of
19736 * the bpl can be used without byte swapping.
19738 * Returns valid XRI = Success, NO_XRI = Failure.
19741 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19742 struct lpfc_sglq *sglq)
19744 uint16_t xritag = NO_XRI;
19745 struct ulp_bde64 *bpl = NULL;
19746 struct ulp_bde64 bde;
19747 struct sli4_sge *sgl = NULL;
19748 struct lpfc_dmabuf *dmabuf;
19749 union lpfc_wqe128 *wqe;
19752 uint32_t offset = 0; /* accumulated offset in the sg request list */
19753 int inbound = 0; /* number of sg reply entries inbound from firmware */
19756 if (!pwqeq || !sglq)
19759 sgl = (struct sli4_sge *)sglq->sgl;
19761 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19763 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19764 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19765 return sglq->sli4_xritag;
19766 numBdes = pwqeq->rsvd2;
19768 /* The addrHigh and addrLow fields within the WQE
19769 * have not been byteswapped yet so there is no
19770 * need to swap them back.
19772 if (pwqeq->context3)
19773 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19777 bpl = (struct ulp_bde64 *)dmabuf->virt;
19781 for (i = 0; i < numBdes; i++) {
19782 /* Should already be byte swapped. */
19783 sgl->addr_hi = bpl->addrHigh;
19784 sgl->addr_lo = bpl->addrLow;
19786 sgl->word2 = le32_to_cpu(sgl->word2);
19787 if ((i+1) == numBdes)
19788 bf_set(lpfc_sli4_sge_last, sgl, 1);
19790 bf_set(lpfc_sli4_sge_last, sgl, 0);
19791 /* swap the size field back to the cpu so we
19792 * can assign it to the sgl.
19794 bde.tus.w = le32_to_cpu(bpl->tus.w);
19795 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19796 /* The offsets in the sgl need to be accumulated
19797 * separately for the request and reply lists.
19798 * The request is always first, the reply follows.
19801 case CMD_GEN_REQUEST64_WQE:
19802 /* add up the reply sg entries */
19803 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19805 /* first inbound? reset the offset */
19808 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19809 bf_set(lpfc_sli4_sge_type, sgl,
19810 LPFC_SGE_TYPE_DATA);
19811 offset += bde.tus.f.bdeSize;
19813 case CMD_FCP_TRSP64_WQE:
19814 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19815 bf_set(lpfc_sli4_sge_type, sgl,
19816 LPFC_SGE_TYPE_DATA);
19818 case CMD_FCP_TSEND64_WQE:
19819 case CMD_FCP_TRECEIVE64_WQE:
19820 bf_set(lpfc_sli4_sge_type, sgl,
19821 bpl->tus.f.bdeFlags);
19825 offset += bde.tus.f.bdeSize;
19826 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19829 sgl->word2 = cpu_to_le32(sgl->word2);
19833 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19834 /* The addrHigh and addrLow fields of the BDE have not
19835 * been byteswapped yet so they need to be swapped
19836 * before putting them in the sgl.
19838 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19839 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19840 sgl->word2 = le32_to_cpu(sgl->word2);
19841 bf_set(lpfc_sli4_sge_last, sgl, 1);
19842 sgl->word2 = cpu_to_le32(sgl->word2);
19843 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19845 return sglq->sli4_xritag;
19849 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19850 * @phba: Pointer to HBA context object.
19851 * @ring_number: Base sli ring number
19852 * @pwqe: Pointer to command WQE.
19855 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
19856 struct lpfc_iocbq *pwqe)
19858 union lpfc_wqe128 *wqe = &pwqe->wqe;
19859 struct lpfc_nvmet_rcv_ctx *ctxp;
19860 struct lpfc_queue *wq;
19861 struct lpfc_sglq *sglq;
19862 struct lpfc_sli_ring *pring;
19863 unsigned long iflags;
19866 /* NVME_LS and NVME_LS ABTS requests. */
19867 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19868 pring = phba->sli4_hba.nvmels_wq->pring;
19869 spin_lock_irqsave(&pring->ring_lock, iflags);
19870 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19872 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19875 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19876 pwqe->sli4_xritag = sglq->sli4_xritag;
19877 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19878 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19881 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19882 pwqe->sli4_xritag);
19883 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19885 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19889 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19890 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19894 /* NVME_FCREQ and NVME_ABTS requests */
19895 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19896 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19897 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19899 spin_lock_irqsave(&pring->ring_lock, iflags);
19900 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19901 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19902 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19903 ret = lpfc_sli4_wq_put(wq, wqe);
19905 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19908 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19909 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19913 /* NVMET requests */
19914 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19915 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19916 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19918 spin_lock_irqsave(&pring->ring_lock, iflags);
19919 ctxp = pwqe->context2;
19920 sglq = ctxp->ctxbuf->sglq;
19921 if (pwqe->sli4_xritag == NO_XRI) {
19922 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19923 pwqe->sli4_xritag = sglq->sli4_xritag;
19925 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19926 pwqe->sli4_xritag);
19927 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19928 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19929 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19930 ret = lpfc_sli4_wq_put(wq, wqe);
19932 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19935 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19936 spin_unlock_irqrestore(&pring->ring_lock, iflags);