1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/pci.h>
16 #include <linux/sysfs.h>
22 #define DRV_NAME "octeontx2-af"
23 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
24 #define DRV_VERSION "1.0"
26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
29 struct rvu_block *block, int lf);
30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31 struct rvu_block *block, int lf);
32 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
34 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
36 void (mbox_handler)(struct work_struct *),
37 void (mbox_up_handler)(struct work_struct *));
43 /* Supported devices */
44 static const struct pci_device_id rvu_id_table[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
46 { 0, } /* end of table */
49 MODULE_AUTHOR("Marvell International Ltd.");
50 MODULE_DESCRIPTION(DRV_STRING);
51 MODULE_LICENSE("GPL v2");
52 MODULE_VERSION(DRV_VERSION);
53 MODULE_DEVICE_TABLE(pci, rvu_id_table);
55 static char *mkex_profile; /* MKEX profile name */
56 module_param(mkex_profile, charp, 0000);
57 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
59 static void rvu_setup_hw_capabilities(struct rvu *rvu)
61 struct rvu_hwinfo *hw = rvu->hw;
63 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
64 hw->cap.nix_fixed_txschq_mapping = false;
65 hw->cap.nix_shaping = true;
66 hw->cap.nix_tx_link_bp = true;
68 if (is_rvu_96xx_B0(rvu)) {
69 hw->cap.nix_fixed_txschq_mapping = true;
70 hw->cap.nix_txsch_per_cgx_lmac = 4;
71 hw->cap.nix_txsch_per_lbk_lmac = 132;
72 hw->cap.nix_txsch_per_sdp_lmac = 76;
73 hw->cap.nix_shaping = false;
74 hw->cap.nix_tx_link_bp = false;
78 /* Poll a RVU block's register 'offset', for a 'zero'
79 * or 'nonzero' at bits specified by 'mask'
81 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
83 unsigned long timeout = jiffies + usecs_to_jiffies(10000);
87 reg = rvu->afreg_base + ((block << 28) | offset);
88 while (time_before(jiffies, timeout)) {
90 if (zero && !(reg_val & mask))
92 if (!zero && (reg_val & mask))
99 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
106 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
110 __set_bit(id, rsrc->bmap);
115 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
122 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
123 if (start >= rsrc->max)
126 bitmap_set(rsrc->bmap, start, nrsrc);
130 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
134 if (start >= rsrc->max)
137 bitmap_clear(rsrc->bmap, start, nrsrc);
140 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
147 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
148 if (start >= rsrc->max)
154 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
159 __clear_bit(id, rsrc->bmap);
162 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
169 used = bitmap_weight(rsrc->bmap, rsrc->max);
170 return (rsrc->max - used);
173 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
175 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
176 sizeof(long), GFP_KERNEL);
182 /* Get block LF's HW index from a PF_FUNC's block slot number */
183 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
188 mutex_lock(&rvu->rsrc_lock);
189 for (lf = 0; lf < block->lf.max; lf++) {
190 if (block->fn_map[lf] == pcifunc) {
192 mutex_unlock(&rvu->rsrc_lock);
198 mutex_unlock(&rvu->rsrc_lock);
202 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
203 * Some silicon variants of OcteonTX2 supports
204 * multiple blocks of same type.
206 * @pcifunc has to be zero when no LF is yet attached.
208 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
210 int devnum, blkaddr = -ENODEV;
216 blkaddr = BLKADDR_NPC;
219 blkaddr = BLKADDR_NPA;
222 /* For now assume NIX0 */
224 blkaddr = BLKADDR_NIX0;
229 blkaddr = BLKADDR_SSO;
232 blkaddr = BLKADDR_SSOW;
235 blkaddr = BLKADDR_TIM;
238 /* For now assume CPT0 */
240 blkaddr = BLKADDR_CPT0;
246 /* Check if this is a RVU PF or VF */
247 if (pcifunc & RVU_PFVF_FUNC_MASK) {
249 devnum = rvu_get_hwvf(rvu, pcifunc);
252 devnum = rvu_get_pf(pcifunc);
255 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
256 if (blktype == BLKTYPE_NIX) {
257 reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
258 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
260 blkaddr = BLKADDR_NIX0;
263 /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
264 if (blktype == BLKTYPE_CPT) {
265 reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
266 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
268 blkaddr = BLKADDR_CPT0;
272 if (is_block_implemented(rvu->hw, blkaddr))
277 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
278 struct rvu_block *block, u16 pcifunc,
281 int devnum, num_lfs = 0;
285 if (lf >= block->lf.max) {
286 dev_err(&rvu->pdev->dev,
287 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
288 __func__, lf, block->name, block->lf.max);
292 /* Check if this is for a RVU PF or VF */
293 if (pcifunc & RVU_PFVF_FUNC_MASK) {
295 devnum = rvu_get_hwvf(rvu, pcifunc);
298 devnum = rvu_get_pf(pcifunc);
301 block->fn_map[lf] = attach ? pcifunc : 0;
303 switch (block->type) {
305 pfvf->npalf = attach ? true : false;
306 num_lfs = pfvf->npalf;
309 pfvf->nixlf = attach ? true : false;
310 num_lfs = pfvf->nixlf;
313 attach ? pfvf->sso++ : pfvf->sso--;
317 attach ? pfvf->ssow++ : pfvf->ssow--;
318 num_lfs = pfvf->ssow;
321 attach ? pfvf->timlfs++ : pfvf->timlfs--;
322 num_lfs = pfvf->timlfs;
325 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
326 num_lfs = pfvf->cptlfs;
330 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
331 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
334 inline int rvu_get_pf(u16 pcifunc)
336 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
339 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
343 /* Get numVFs attached to this PF and first HWVF */
344 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
345 *numvfs = (cfg >> 12) & 0xFF;
349 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
354 pf = rvu_get_pf(pcifunc);
355 func = pcifunc & RVU_PFVF_FUNC_MASK;
357 /* Get first HWVF attached to this PF */
358 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
360 return ((cfg & 0xFFF) + func - 1);
363 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
365 /* Check if it is a PF or VF */
366 if (pcifunc & RVU_PFVF_FUNC_MASK)
367 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
369 return &rvu->pf[rvu_get_pf(pcifunc)];
372 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
377 pf = rvu_get_pf(pcifunc);
378 if (pf >= rvu->hw->total_pfs)
381 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
384 /* Check if VF is within number of VFs attached to this PF */
385 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
386 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
387 nvfs = (cfg >> 12) & 0xFF;
394 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
396 struct rvu_block *block;
398 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
401 block = &hw->block[blkaddr];
402 return block->implemented;
405 static void rvu_check_block_implemented(struct rvu *rvu)
407 struct rvu_hwinfo *hw = rvu->hw;
408 struct rvu_block *block;
412 /* For each block check if 'implemented' bit is set */
413 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
414 block = &hw->block[blkid];
415 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
416 if (cfg & BIT_ULL(11))
417 block->implemented = true;
421 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
425 if (!block->implemented)
428 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
429 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
434 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
436 struct rvu_block *block = &rvu->hw->block[blkaddr];
438 if (!block->implemented)
441 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
442 rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
445 static void rvu_reset_all_blocks(struct rvu *rvu)
447 /* Do a HW reset of all RVU blocks */
448 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
449 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
450 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
451 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
452 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
453 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
454 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
455 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
456 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
459 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
461 struct rvu_pfvf *pfvf;
465 for (lf = 0; lf < block->lf.max; lf++) {
466 cfg = rvu_read64(rvu, block->addr,
467 block->lfcfg_reg | (lf << block->lfshift));
468 if (!(cfg & BIT_ULL(63)))
471 /* Set this resource as being used */
472 __set_bit(lf, block->lf.bmap);
474 /* Get, to whom this LF is attached */
475 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
476 rvu_update_rsrc_map(rvu, pfvf, block,
477 (cfg >> 8) & 0xFFFF, lf, true);
479 /* Set start MSIX vector for this LF within this PF/VF */
480 rvu_set_msix_offset(rvu, pfvf, block, lf);
484 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
493 "PF%d:VF%d is configured with zero msix vectors, %d\n",
500 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
502 min_vecs = RVU_PF_INT_VEC_CNT;
504 if (!(nvecs < min_vecs))
507 "PF%d is configured with too few vectors, %d, min is %d\n",
508 pf, nvecs, min_vecs);
511 static int rvu_setup_msix_resources(struct rvu *rvu)
513 struct rvu_hwinfo *hw = rvu->hw;
514 int pf, vf, numvfs, hwvf, err;
515 int nvecs, offset, max_msix;
516 struct rvu_pfvf *pfvf;
520 for (pf = 0; pf < hw->total_pfs; pf++) {
521 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
522 /* If PF is not enabled, nothing to do */
523 if (!((cfg >> 20) & 0x01))
526 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
529 /* Get num of MSIX vectors attached to this PF */
530 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
531 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
532 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
534 /* Alloc msix bitmap for this PF */
535 err = rvu_alloc_bitmap(&pfvf->msix);
539 /* Allocate memory for MSIX vector to RVU block LF mapping */
540 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
541 sizeof(u16), GFP_KERNEL);
542 if (!pfvf->msix_lfmap)
545 /* For PF0 (AF) firmware will set msix vector offsets for
546 * AF, block AF and PF0_INT vectors, so jump to VFs.
551 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
552 * These are allocated on driver init and never freed,
553 * so no need to set 'msix_lfmap' for these.
555 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
556 nvecs = (cfg >> 12) & 0xFF;
558 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
559 rvu_write64(rvu, BLKADDR_RVUM,
560 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
562 /* Alloc msix bitmap for VFs */
563 for (vf = 0; vf < numvfs; vf++) {
564 pfvf = &rvu->hwvf[hwvf + vf];
565 /* Get num of MSIX vectors attached to this VF */
566 cfg = rvu_read64(rvu, BLKADDR_RVUM,
567 RVU_PRIV_PFX_MSIX_CFG(pf));
568 pfvf->msix.max = (cfg & 0xFFF) + 1;
569 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
571 /* Alloc msix bitmap for this VF */
572 err = rvu_alloc_bitmap(&pfvf->msix);
577 devm_kcalloc(rvu->dev, pfvf->msix.max,
578 sizeof(u16), GFP_KERNEL);
579 if (!pfvf->msix_lfmap)
582 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
583 * These are allocated on driver init and never freed,
584 * so no need to set 'msix_lfmap' for these.
586 cfg = rvu_read64(rvu, BLKADDR_RVUM,
587 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
588 nvecs = (cfg >> 12) & 0xFF;
590 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
591 rvu_write64(rvu, BLKADDR_RVUM,
592 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
597 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
598 * create a IOMMU mapping for the physcial address configured by
599 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
601 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
602 max_msix = cfg & 0xFFFFF;
603 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
604 iova = dma_map_resource(rvu->dev, phy_addr,
605 max_msix * PCI_MSIX_ENTRY_SIZE,
606 DMA_BIDIRECTIONAL, 0);
608 if (dma_mapping_error(rvu->dev, iova))
611 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
612 rvu->msix_base_iova = iova;
617 static void rvu_free_hw_resources(struct rvu *rvu)
619 struct rvu_hwinfo *hw = rvu->hw;
620 struct rvu_block *block;
621 struct rvu_pfvf *pfvf;
625 rvu_npa_freemem(rvu);
626 rvu_npc_freemem(rvu);
627 rvu_nix_freemem(rvu);
629 /* Free block LF bitmaps */
630 for (id = 0; id < BLK_COUNT; id++) {
631 block = &hw->block[id];
632 kfree(block->lf.bmap);
635 /* Free MSIX bitmaps */
636 for (id = 0; id < hw->total_pfs; id++) {
638 kfree(pfvf->msix.bmap);
641 for (id = 0; id < hw->total_vfs; id++) {
642 pfvf = &rvu->hwvf[id];
643 kfree(pfvf->msix.bmap);
646 /* Unmap MSIX vector base IOVA mapping */
647 if (!rvu->msix_base_iova)
649 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
650 max_msix = cfg & 0xFFFFF;
651 dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
652 max_msix * PCI_MSIX_ENTRY_SIZE,
653 DMA_BIDIRECTIONAL, 0);
655 mutex_destroy(&rvu->rsrc_lock);
658 static int rvu_setup_hw_resources(struct rvu *rvu)
660 struct rvu_hwinfo *hw = rvu->hw;
661 struct rvu_block *block;
665 /* Get HW supported max RVU PF & VF count */
666 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
667 hw->total_pfs = (cfg >> 32) & 0xFF;
668 hw->total_vfs = (cfg >> 20) & 0xFFF;
669 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
671 /* Init NPA LF's bitmap */
672 block = &hw->block[BLKADDR_NPA];
673 if (!block->implemented)
675 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
676 block->lf.max = (cfg >> 16) & 0xFFF;
677 block->addr = BLKADDR_NPA;
678 block->type = BLKTYPE_NPA;
680 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
681 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
682 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
683 block->lfcfg_reg = NPA_PRIV_LFX_CFG;
684 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
685 block->lfreset_reg = NPA_AF_LF_RST;
686 sprintf(block->name, "NPA");
687 err = rvu_alloc_bitmap(&block->lf);
692 /* Init NIX LF's bitmap */
693 block = &hw->block[BLKADDR_NIX0];
694 if (!block->implemented)
696 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
697 block->lf.max = cfg & 0xFFF;
698 block->addr = BLKADDR_NIX0;
699 block->type = BLKTYPE_NIX;
701 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
702 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
703 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
704 block->lfcfg_reg = NIX_PRIV_LFX_CFG;
705 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
706 block->lfreset_reg = NIX_AF_LF_RST;
707 sprintf(block->name, "NIX");
708 err = rvu_alloc_bitmap(&block->lf);
713 /* Init SSO group's bitmap */
714 block = &hw->block[BLKADDR_SSO];
715 if (!block->implemented)
717 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
718 block->lf.max = cfg & 0xFFFF;
719 block->addr = BLKADDR_SSO;
720 block->type = BLKTYPE_SSO;
721 block->multislot = true;
723 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
724 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
725 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
726 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
727 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
728 block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
729 sprintf(block->name, "SSO GROUP");
730 err = rvu_alloc_bitmap(&block->lf);
735 /* Init SSO workslot's bitmap */
736 block = &hw->block[BLKADDR_SSOW];
737 if (!block->implemented)
739 block->lf.max = (cfg >> 56) & 0xFF;
740 block->addr = BLKADDR_SSOW;
741 block->type = BLKTYPE_SSOW;
742 block->multislot = true;
744 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
745 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
746 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
747 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
748 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
749 block->lfreset_reg = SSOW_AF_LF_HWS_RST;
750 sprintf(block->name, "SSOWS");
751 err = rvu_alloc_bitmap(&block->lf);
756 /* Init TIM LF's bitmap */
757 block = &hw->block[BLKADDR_TIM];
758 if (!block->implemented)
760 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
761 block->lf.max = cfg & 0xFFFF;
762 block->addr = BLKADDR_TIM;
763 block->type = BLKTYPE_TIM;
764 block->multislot = true;
766 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
767 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
768 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
769 block->lfcfg_reg = TIM_PRIV_LFX_CFG;
770 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
771 block->lfreset_reg = TIM_AF_LF_RST;
772 sprintf(block->name, "TIM");
773 err = rvu_alloc_bitmap(&block->lf);
778 /* Init CPT LF's bitmap */
779 block = &hw->block[BLKADDR_CPT0];
780 if (!block->implemented)
782 cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
783 block->lf.max = cfg & 0xFF;
784 block->addr = BLKADDR_CPT0;
785 block->type = BLKTYPE_CPT;
786 block->multislot = true;
788 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
789 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
790 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
791 block->lfcfg_reg = CPT_PRIV_LFX_CFG;
792 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
793 block->lfreset_reg = CPT_AF_LF_RST;
794 sprintf(block->name, "CPT");
795 err = rvu_alloc_bitmap(&block->lf);
800 /* Allocate memory for PFVF data */
801 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
802 sizeof(struct rvu_pfvf), GFP_KERNEL);
806 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
807 sizeof(struct rvu_pfvf), GFP_KERNEL);
811 mutex_init(&rvu->rsrc_lock);
813 err = rvu_setup_msix_resources(rvu);
817 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
818 block = &hw->block[blkid];
822 /* Allocate memory for block LF/slot to pcifunc mapping info */
823 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
824 sizeof(u16), GFP_KERNEL);
828 /* Scan all blocks to check if low level firmware has
829 * already provisioned any of the resources to a PF/VF.
831 rvu_scan_block(rvu, block);
834 err = rvu_npc_init(rvu);
838 err = rvu_cgx_init(rvu);
842 err = rvu_npa_init(rvu);
846 err = rvu_nix_init(rvu);
858 /* NPA and NIX admin queue APIs */
859 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
864 qmem_free(rvu->dev, aq->inst);
865 qmem_free(rvu->dev, aq->res);
866 devm_kfree(rvu->dev, aq);
869 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
870 int qsize, int inst_size, int res_size)
872 struct admin_queue *aq;
875 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
880 /* Alloc memory for instructions i.e AQ */
881 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
883 devm_kfree(rvu->dev, aq);
887 /* Alloc memory for results */
888 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
890 rvu_aq_free(rvu, aq);
894 spin_lock_init(&aq->lock);
898 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
899 struct ready_msg_rsp *rsp)
904 /* Get current count of a RVU block's LF/slots
905 * provisioned to a given RVU func.
907 static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
911 return pfvf->npalf ? 1 : 0;
913 return pfvf->nixlf ? 1 : 0;
926 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
928 struct rvu_pfvf *pfvf;
930 if (!is_pf_func_valid(rvu, pcifunc))
933 pfvf = rvu_get_pfvf(rvu, pcifunc);
935 /* Check if this PFFUNC has a LF of type blktype attached */
936 if (!rvu_get_rsrc_mapcount(pfvf, blktype))
942 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
943 int pcifunc, int slot)
947 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
948 rvu_write64(rvu, block->addr, block->lookup_reg, val);
949 /* Wait for the lookup to finish */
950 /* TODO: put some timeout here */
951 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
954 val = rvu_read64(rvu, block->addr, block->lookup_reg);
956 /* Check LF valid bit */
957 if (!(val & (1ULL << 12)))
960 return (val & 0xFFF);
963 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
965 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
966 struct rvu_hwinfo *hw = rvu->hw;
967 struct rvu_block *block;
968 int slot, lf, num_lfs;
971 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
975 block = &hw->block[blkaddr];
977 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
981 for (slot = 0; slot < num_lfs; slot++) {
982 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
983 if (lf < 0) /* This should never happen */
987 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
988 (lf << block->lfshift), 0x00ULL);
990 /* Update SW maintained mapping info as well */
991 rvu_update_rsrc_map(rvu, pfvf, block,
994 /* Free the resource */
995 rvu_free_rsrc(&block->lf, lf);
997 /* Clear MSIX vector offset for this LF */
998 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1002 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1005 struct rvu_hwinfo *hw = rvu->hw;
1006 bool detach_all = true;
1007 struct rvu_block *block;
1010 mutex_lock(&rvu->rsrc_lock);
1012 /* Check for partial resource detach */
1013 if (detach && detach->partial)
1016 /* Check for RVU block's LFs attached to this func,
1017 * if so, detach them.
1019 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1020 block = &hw->block[blkid];
1021 if (!block->lf.bmap)
1023 if (!detach_all && detach) {
1024 if (blkid == BLKADDR_NPA && !detach->npalf)
1026 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1028 else if ((blkid == BLKADDR_SSO) && !detach->sso)
1030 else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1032 else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1034 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1037 rvu_detach_block(rvu, pcifunc, block->type);
1040 mutex_unlock(&rvu->rsrc_lock);
1044 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1045 struct rsrc_detach *detach,
1046 struct msg_rsp *rsp)
1048 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1051 static void rvu_attach_block(struct rvu *rvu, int pcifunc,
1052 int blktype, int num_lfs)
1054 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1055 struct rvu_hwinfo *hw = rvu->hw;
1056 struct rvu_block *block;
1064 blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
1068 block = &hw->block[blkaddr];
1069 if (!block->lf.bmap)
1072 for (slot = 0; slot < num_lfs; slot++) {
1073 /* Allocate the resource */
1074 lf = rvu_alloc_rsrc(&block->lf);
1078 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1079 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1080 (lf << block->lfshift), cfg);
1081 rvu_update_rsrc_map(rvu, pfvf, block,
1084 /* Set start MSIX vector for this LF within this PF/VF */
1085 rvu_set_msix_offset(rvu, pfvf, block, lf);
1089 static int rvu_check_rsrc_availability(struct rvu *rvu,
1090 struct rsrc_attach *req, u16 pcifunc)
1092 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1093 struct rvu_hwinfo *hw = rvu->hw;
1094 struct rvu_block *block;
1095 int free_lfs, mappedlfs;
1097 /* Only one NPA LF can be attached */
1098 if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
1099 block = &hw->block[BLKADDR_NPA];
1100 free_lfs = rvu_rsrc_free_count(&block->lf);
1103 } else if (req->npalf) {
1104 dev_err(&rvu->pdev->dev,
1105 "Func 0x%x: Invalid req, already has NPA\n",
1110 /* Only one NIX LF can be attached */
1111 if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
1112 block = &hw->block[BLKADDR_NIX0];
1113 free_lfs = rvu_rsrc_free_count(&block->lf);
1116 } else if (req->nixlf) {
1117 dev_err(&rvu->pdev->dev,
1118 "Func 0x%x: Invalid req, already has NIX\n",
1124 block = &hw->block[BLKADDR_SSO];
1125 /* Is request within limits ? */
1126 if (req->sso > block->lf.max) {
1127 dev_err(&rvu->pdev->dev,
1128 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1129 pcifunc, req->sso, block->lf.max);
1132 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1133 free_lfs = rvu_rsrc_free_count(&block->lf);
1134 /* Check if additional resources are available */
1135 if (req->sso > mappedlfs &&
1136 ((req->sso - mappedlfs) > free_lfs))
1141 block = &hw->block[BLKADDR_SSOW];
1142 if (req->ssow > block->lf.max) {
1143 dev_err(&rvu->pdev->dev,
1144 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1145 pcifunc, req->sso, block->lf.max);
1148 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1149 free_lfs = rvu_rsrc_free_count(&block->lf);
1150 if (req->ssow > mappedlfs &&
1151 ((req->ssow - mappedlfs) > free_lfs))
1156 block = &hw->block[BLKADDR_TIM];
1157 if (req->timlfs > block->lf.max) {
1158 dev_err(&rvu->pdev->dev,
1159 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1160 pcifunc, req->timlfs, block->lf.max);
1163 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1164 free_lfs = rvu_rsrc_free_count(&block->lf);
1165 if (req->timlfs > mappedlfs &&
1166 ((req->timlfs - mappedlfs) > free_lfs))
1171 block = &hw->block[BLKADDR_CPT0];
1172 if (req->cptlfs > block->lf.max) {
1173 dev_err(&rvu->pdev->dev,
1174 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1175 pcifunc, req->cptlfs, block->lf.max);
1178 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1179 free_lfs = rvu_rsrc_free_count(&block->lf);
1180 if (req->cptlfs > mappedlfs &&
1181 ((req->cptlfs - mappedlfs) > free_lfs))
1188 dev_info(rvu->dev, "Request for %s failed\n", block->name);
1192 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1193 struct rsrc_attach *attach,
1194 struct msg_rsp *rsp)
1196 u16 pcifunc = attach->hdr.pcifunc;
1199 /* If first request, detach all existing attached resources */
1200 if (!attach->modify)
1201 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1203 mutex_lock(&rvu->rsrc_lock);
1205 /* Check if the request can be accommodated */
1206 err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1210 /* Now attach the requested resources */
1212 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
1215 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
1218 /* RVU func doesn't know which exact LF or slot is attached
1219 * to it, it always sees as slot 0,1,2. So for a 'modify'
1220 * request, simply detach all existing attached LFs/slots
1221 * and attach a fresh.
1224 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1225 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
1230 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1231 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
1234 if (attach->timlfs) {
1236 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1237 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
1240 if (attach->cptlfs) {
1242 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1243 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
1247 mutex_unlock(&rvu->rsrc_lock);
1251 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1252 int blkaddr, int lf)
1257 return MSIX_VECTOR_INVALID;
1259 for (vec = 0; vec < pfvf->msix.max; vec++) {
1260 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1263 return MSIX_VECTOR_INVALID;
1266 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1267 struct rvu_block *block, int lf)
1269 u16 nvecs, vec, offset;
1272 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1273 (lf << block->lfshift));
1274 nvecs = (cfg >> 12) & 0xFF;
1276 /* Check and alloc MSIX vectors, must be contiguous */
1277 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1280 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1282 /* Config MSIX offset in LF */
1283 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1284 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1286 /* Update the bitmap as well */
1287 for (vec = 0; vec < nvecs; vec++)
1288 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1291 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1292 struct rvu_block *block, int lf)
1294 u16 nvecs, vec, offset;
1297 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1298 (lf << block->lfshift));
1299 nvecs = (cfg >> 12) & 0xFF;
1301 /* Clear MSIX offset in LF */
1302 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1303 (lf << block->lfshift), cfg & ~0x7FFULL);
1305 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1307 /* Update the mapping */
1308 for (vec = 0; vec < nvecs; vec++)
1309 pfvf->msix_lfmap[offset + vec] = 0;
1311 /* Free the same in MSIX bitmap */
1312 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1315 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1316 struct msix_offset_rsp *rsp)
1318 struct rvu_hwinfo *hw = rvu->hw;
1319 u16 pcifunc = req->hdr.pcifunc;
1320 struct rvu_pfvf *pfvf;
1323 pfvf = rvu_get_pfvf(rvu, pcifunc);
1324 if (!pfvf->msix.bmap)
1327 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1328 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1329 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1331 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
1332 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
1334 rsp->sso = pfvf->sso;
1335 for (slot = 0; slot < rsp->sso; slot++) {
1336 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1337 rsp->sso_msixoff[slot] =
1338 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1341 rsp->ssow = pfvf->ssow;
1342 for (slot = 0; slot < rsp->ssow; slot++) {
1343 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1344 rsp->ssow_msixoff[slot] =
1345 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1348 rsp->timlfs = pfvf->timlfs;
1349 for (slot = 0; slot < rsp->timlfs; slot++) {
1350 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1351 rsp->timlf_msixoff[slot] =
1352 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1355 rsp->cptlfs = pfvf->cptlfs;
1356 for (slot = 0; slot < rsp->cptlfs; slot++) {
1357 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1358 rsp->cptlf_msixoff[slot] =
1359 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1364 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1365 struct msg_rsp *rsp)
1367 u16 pcifunc = req->hdr.pcifunc;
1371 vf = pcifunc & RVU_PFVF_FUNC_MASK;
1372 cfg = rvu_read64(rvu, BLKADDR_RVUM,
1373 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1374 numvfs = (cfg >> 12) & 0xFF;
1376 if (vf && vf <= numvfs)
1377 __rvu_flr_handler(rvu, pcifunc);
1379 return RVU_INVALID_VF_ID;
1384 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1385 struct get_hw_cap_rsp *rsp)
1387 struct rvu_hwinfo *hw = rvu->hw;
1389 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1390 rsp->nix_shaping = hw->cap.nix_shaping;
1395 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1396 struct mbox_msghdr *req)
1398 struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1400 /* Check if valid, if not reply with a invalid msg */
1401 if (req->sig != OTX2_MBOX_REQ_SIG)
1405 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
1407 struct _rsp_type *rsp; \
1410 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
1412 sizeof(struct _rsp_type)); \
1413 /* some handlers should complete even if reply */ \
1414 /* could not be allocated */ \
1416 _id != MBOX_MSG_DETACH_RESOURCES && \
1417 _id != MBOX_MSG_NIX_TXSCH_FREE && \
1418 _id != MBOX_MSG_VF_FLR) \
1421 rsp->hdr.id = _id; \
1422 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
1423 rsp->hdr.pcifunc = req->pcifunc; \
1427 err = rvu_mbox_handler_ ## _fn_name(rvu, \
1428 (struct _req_type *)req, \
1431 rsp->hdr.rc = err; \
1433 return rsp ? err : -ENOMEM; \
1440 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
1445 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
1447 struct rvu *rvu = mwork->rvu;
1448 int offset, err, id, devid;
1449 struct otx2_mbox_dev *mdev;
1450 struct mbox_hdr *req_hdr;
1451 struct mbox_msghdr *msg;
1452 struct mbox_wq_info *mw;
1453 struct otx2_mbox *mbox;
1457 mw = &rvu->afpf_wq_info;
1460 mw = &rvu->afvf_wq_info;
1466 devid = mwork - mw->mbox_wrk;
1468 mdev = &mbox->dev[devid];
1470 /* Process received mbox messages */
1471 req_hdr = mdev->mbase + mbox->rx_start;
1472 if (mw->mbox_wrk[devid].num_msgs == 0)
1475 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
1477 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
1478 msg = mdev->mbase + offset;
1480 /* Set which PF/VF sent this message based on mbox IRQ */
1484 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
1485 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
1489 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
1490 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
1494 err = rvu_process_mbox_msg(mbox, devid, msg);
1496 offset = mbox->rx_start + msg->next_msgoff;
1500 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
1501 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
1502 err, otx2_mbox_id2name(msg->id),
1503 msg->id, rvu_get_pf(msg->pcifunc),
1504 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1506 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
1507 err, otx2_mbox_id2name(msg->id),
1510 mw->mbox_wrk[devid].num_msgs = 0;
1512 /* Send mbox responses to VF/PF */
1513 otx2_mbox_msg_send(mbox, devid);
1516 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
1518 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1520 __rvu_mbox_handler(mwork, TYPE_AFPF);
1523 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
1525 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1527 __rvu_mbox_handler(mwork, TYPE_AFVF);
1530 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
1532 struct rvu *rvu = mwork->rvu;
1533 struct otx2_mbox_dev *mdev;
1534 struct mbox_hdr *rsp_hdr;
1535 struct mbox_msghdr *msg;
1536 struct mbox_wq_info *mw;
1537 struct otx2_mbox *mbox;
1538 int offset, id, devid;
1542 mw = &rvu->afpf_wq_info;
1545 mw = &rvu->afvf_wq_info;
1551 devid = mwork - mw->mbox_wrk_up;
1552 mbox = &mw->mbox_up;
1553 mdev = &mbox->dev[devid];
1555 rsp_hdr = mdev->mbase + mbox->rx_start;
1556 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
1557 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
1561 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
1563 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
1564 msg = mdev->mbase + offset;
1566 if (msg->id >= MBOX_MSG_MAX) {
1568 "Mbox msg with unknown ID 0x%x\n", msg->id);
1572 if (msg->sig != OTX2_MBOX_RSP_SIG) {
1574 "Mbox msg with wrong signature %x, ID 0x%x\n",
1580 case MBOX_MSG_CGX_LINK_EVENT:
1585 "Mbox msg response has err %d, ID 0x%x\n",
1590 offset = mbox->rx_start + msg->next_msgoff;
1593 mw->mbox_wrk_up[devid].up_num_msgs = 0;
1595 otx2_mbox_reset(mbox, devid);
1598 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
1600 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1602 __rvu_mbox_up_handler(mwork, TYPE_AFPF);
1605 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
1607 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1609 __rvu_mbox_up_handler(mwork, TYPE_AFVF);
1612 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
1614 void (mbox_handler)(struct work_struct *),
1615 void (mbox_up_handler)(struct work_struct *))
1617 void __iomem *hwbase = NULL, *reg_base;
1618 int err, i, dir, dir_up;
1619 struct rvu_work *mwork;
1625 name = "rvu_afpf_mailbox";
1626 bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
1627 dir = MBOX_DIR_AFPF;
1628 dir_up = MBOX_DIR_AFPF_UP;
1629 reg_base = rvu->afreg_base;
1632 name = "rvu_afvf_mailbox";
1633 bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
1634 dir = MBOX_DIR_PFVF;
1635 dir_up = MBOX_DIR_PFVF_UP;
1636 reg_base = rvu->pfreg_base;
1642 mw->mbox_wq = alloc_workqueue(name,
1643 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1648 mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
1649 sizeof(struct rvu_work), GFP_KERNEL);
1650 if (!mw->mbox_wrk) {
1655 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
1656 sizeof(struct rvu_work), GFP_KERNEL);
1657 if (!mw->mbox_wrk_up) {
1662 /* Mailbox is a reserved memory (in RAM) region shared between
1663 * RVU devices, shouldn't be mapped as device memory to allow
1664 * unaligned accesses.
1666 hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
1668 dev_err(rvu->dev, "Unable to map mailbox region\n");
1673 err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
1677 err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
1678 reg_base, dir_up, num);
1682 for (i = 0; i < num; i++) {
1683 mwork = &mw->mbox_wrk[i];
1685 INIT_WORK(&mwork->work, mbox_handler);
1687 mwork = &mw->mbox_wrk_up[i];
1689 INIT_WORK(&mwork->work, mbox_up_handler);
1695 iounmap((void __iomem *)hwbase);
1696 destroy_workqueue(mw->mbox_wq);
1700 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
1703 flush_workqueue(mw->mbox_wq);
1704 destroy_workqueue(mw->mbox_wq);
1708 if (mw->mbox.hwbase)
1709 iounmap((void __iomem *)mw->mbox.hwbase);
1711 otx2_mbox_destroy(&mw->mbox);
1712 otx2_mbox_destroy(&mw->mbox_up);
1715 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
1716 int mdevs, u64 intr)
1718 struct otx2_mbox_dev *mdev;
1719 struct otx2_mbox *mbox;
1720 struct mbox_hdr *hdr;
1723 for (i = first; i < mdevs; i++) {
1725 if (!(intr & BIT_ULL(i - first)))
1729 mdev = &mbox->dev[i];
1730 hdr = mdev->mbase + mbox->rx_start;
1732 /*The hdr->num_msgs is set to zero immediately in the interrupt
1733 * handler to ensure that it holds a correct value next time
1734 * when the interrupt handler is called.
1735 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
1736 * pf>mbox.up_num_msgs holds the data for use in
1737 * pfaf_mbox_up_handler.
1740 if (hdr->num_msgs) {
1741 mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
1743 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
1745 mbox = &mw->mbox_up;
1746 mdev = &mbox->dev[i];
1747 hdr = mdev->mbase + mbox->rx_start;
1748 if (hdr->num_msgs) {
1749 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
1751 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
1756 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
1758 struct rvu *rvu = (struct rvu *)rvu_irq;
1762 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
1763 /* Clear interrupts */
1764 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
1766 /* Sync with mbox memory region */
1769 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
1771 /* Handle VF interrupts */
1773 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
1774 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
1776 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
1780 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
1781 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
1783 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
1788 static void rvu_enable_mbox_intr(struct rvu *rvu)
1790 struct rvu_hwinfo *hw = rvu->hw;
1792 /* Clear spurious irqs, if any */
1793 rvu_write64(rvu, BLKADDR_RVUM,
1794 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
1796 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
1797 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
1798 INTR_MASK(hw->total_pfs) & ~1ULL);
1801 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
1803 struct rvu_block *block;
1804 int slot, lf, num_lfs;
1807 block = &rvu->hw->block[blkaddr];
1808 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
1812 for (slot = 0; slot < num_lfs; slot++) {
1813 lf = rvu_get_lf(rvu, block, pcifunc, slot);
1817 /* Cleanup LF and reset it */
1818 if (block->addr == BLKADDR_NIX0)
1819 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
1820 else if (block->addr == BLKADDR_NPA)
1821 rvu_npa_lf_teardown(rvu, pcifunc, lf);
1823 err = rvu_lf_reset(rvu, block, lf);
1825 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
1831 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
1833 mutex_lock(&rvu->flr_lock);
1834 /* Reset order should reflect inter-block dependencies:
1835 * 1. Reset any packet/work sources (NIX, CPT, TIM)
1836 * 2. Flush and reset SSO/SSOW
1837 * 3. Cleanup pools (NPA)
1839 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
1840 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
1841 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
1842 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
1843 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
1844 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
1845 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1846 mutex_unlock(&rvu->flr_lock);
1849 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
1853 /* pcifunc = 0(PF0) | (vf + 1) */
1854 __rvu_flr_handler(rvu, vf + 1);
1861 /* Signal FLR finish and enable IRQ */
1862 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
1863 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
1866 static void rvu_flr_handler(struct work_struct *work)
1868 struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
1869 struct rvu *rvu = flrwork->rvu;
1870 u16 pcifunc, numvfs, vf;
1874 pf = flrwork - rvu->flr_wrk;
1875 if (pf >= rvu->hw->total_pfs) {
1876 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
1880 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1881 numvfs = (cfg >> 12) & 0xFF;
1882 pcifunc = pf << RVU_PFVF_PF_SHIFT;
1884 for (vf = 0; vf < numvfs; vf++)
1885 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
1887 __rvu_flr_handler(rvu, pcifunc);
1889 /* Signal FLR finish */
1890 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
1892 /* Enable interrupt */
1893 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
1896 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
1898 int dev, vf, reg = 0;
1904 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
1908 for (vf = 0; vf < numvfs; vf++) {
1909 if (!(intr & BIT_ULL(vf)))
1911 dev = vf + start_vf + rvu->hw->total_pfs;
1912 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
1913 /* Clear and disable the interrupt */
1914 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
1915 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
1919 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
1921 struct rvu *rvu = (struct rvu *)rvu_irq;
1925 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
1929 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1930 if (intr & (1ULL << pf)) {
1931 /* PF is already dead do only AF related operations */
1932 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
1933 /* clear interrupt */
1934 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
1936 /* Disable the interrupt */
1937 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
1943 rvu_afvf_queue_flr_work(rvu, 0, 64);
1945 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
1950 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
1954 /* Nothing to be done here other than clearing the
1957 for (vf = 0; vf < 64; vf++) {
1958 if (intr & (1ULL << vf)) {
1959 /* clear the trpend due to ME(master enable) */
1960 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
1961 /* clear interrupt */
1962 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
1967 /* Handles ME interrupts from VFs of AF */
1968 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
1970 struct rvu *rvu = (struct rvu *)rvu_irq;
1974 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
1976 for (vfset = 0; vfset <= 1; vfset++) {
1977 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
1979 rvu_me_handle_vfset(rvu, vfset, intr);
1985 /* Handles ME interrupts from PFs */
1986 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
1988 struct rvu *rvu = (struct rvu *)rvu_irq;
1992 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
1994 /* Nothing to be done here other than clearing the
1997 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1998 if (intr & (1ULL << pf)) {
1999 /* clear the trpend due to ME(master enable) */
2000 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2002 /* clear interrupt */
2003 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2011 static void rvu_unregister_interrupts(struct rvu *rvu)
2015 /* Disable the Mbox interrupt */
2016 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2017 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2019 /* Disable the PF FLR interrupt */
2020 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2021 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2023 /* Disable the PF ME interrupt */
2024 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2025 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2027 for (irq = 0; irq < rvu->num_vec; irq++) {
2028 if (rvu->irq_allocated[irq])
2029 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2032 pci_free_irq_vectors(rvu->pdev);
2036 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2038 struct rvu_pfvf *pfvf = &rvu->pf[0];
2042 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2044 /* Make sure there are enough MSIX vectors configured so that
2045 * VF interrupts can be handled. Offset equal to zero means
2046 * that PF vectors are not configured and overlapping AF vectors.
2048 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2052 static int rvu_register_interrupts(struct rvu *rvu)
2054 int ret, offset, pf_vec_start;
2056 rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2058 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2059 NAME_SIZE, GFP_KERNEL);
2063 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2064 sizeof(bool), GFP_KERNEL);
2065 if (!rvu->irq_allocated)
2069 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2070 rvu->num_vec, PCI_IRQ_MSIX);
2073 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2078 /* Register mailbox interrupt handler */
2079 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2080 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2081 rvu_mbox_intr_handler, 0,
2082 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2085 "RVUAF: IRQ registration failed for mbox irq\n");
2089 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2091 /* Enable mailbox interrupts from all PFs */
2092 rvu_enable_mbox_intr(rvu);
2094 /* Register FLR interrupt handler */
2095 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2097 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2098 rvu_flr_intr_handler, 0,
2099 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2103 "RVUAF: IRQ registration failed for FLR\n");
2106 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2108 /* Enable FLR interrupt for all PFs*/
2109 rvu_write64(rvu, BLKADDR_RVUM,
2110 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2112 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2113 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2115 /* Register ME interrupt handler */
2116 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2118 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2119 rvu_me_pf_intr_handler, 0,
2120 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2124 "RVUAF: IRQ registration failed for ME\n");
2126 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2128 /* Enable ME interrupt for all PFs*/
2129 rvu_write64(rvu, BLKADDR_RVUM,
2130 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2132 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2133 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2135 if (!rvu_afvf_msix_vectors_num_ok(rvu))
2138 /* Get PF MSIX vectors offset. */
2139 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2140 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2142 /* Register MBOX0 interrupt. */
2143 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2144 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2145 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2146 rvu_mbox_intr_handler, 0,
2147 &rvu->irq_name[offset * NAME_SIZE],
2151 "RVUAF: IRQ registration failed for Mbox0\n");
2153 rvu->irq_allocated[offset] = true;
2155 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2156 * simply increment current offset by 1.
2158 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2159 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2160 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2161 rvu_mbox_intr_handler, 0,
2162 &rvu->irq_name[offset * NAME_SIZE],
2166 "RVUAF: IRQ registration failed for Mbox1\n");
2168 rvu->irq_allocated[offset] = true;
2170 /* Register FLR interrupt handler for AF's VFs */
2171 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2172 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2173 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2174 rvu_flr_intr_handler, 0,
2175 &rvu->irq_name[offset * NAME_SIZE], rvu);
2178 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2181 rvu->irq_allocated[offset] = true;
2183 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2184 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2185 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2186 rvu_flr_intr_handler, 0,
2187 &rvu->irq_name[offset * NAME_SIZE], rvu);
2190 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2193 rvu->irq_allocated[offset] = true;
2195 /* Register ME interrupt handler for AF's VFs */
2196 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2197 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2198 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2199 rvu_me_vf_intr_handler, 0,
2200 &rvu->irq_name[offset * NAME_SIZE], rvu);
2203 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2206 rvu->irq_allocated[offset] = true;
2208 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2209 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2210 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2211 rvu_me_vf_intr_handler, 0,
2212 &rvu->irq_name[offset * NAME_SIZE], rvu);
2215 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2218 rvu->irq_allocated[offset] = true;
2222 rvu_unregister_interrupts(rvu);
2226 static void rvu_flr_wq_destroy(struct rvu *rvu)
2229 flush_workqueue(rvu->flr_wq);
2230 destroy_workqueue(rvu->flr_wq);
2235 static int rvu_flr_init(struct rvu *rvu)
2241 /* Enable FLR for all PFs*/
2242 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2243 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2244 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2248 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2249 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2254 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2255 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2256 sizeof(struct rvu_work), GFP_KERNEL);
2257 if (!rvu->flr_wrk) {
2258 destroy_workqueue(rvu->flr_wq);
2262 for (dev = 0; dev < num_devs; dev++) {
2263 rvu->flr_wrk[dev].rvu = rvu;
2264 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2267 mutex_init(&rvu->flr_lock);
2272 static void rvu_disable_afvf_intr(struct rvu *rvu)
2276 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2277 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2278 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2282 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2283 INTR_MASK(vfs - 64));
2284 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2285 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2288 static void rvu_enable_afvf_intr(struct rvu *rvu)
2292 /* Clear any pending interrupts and enable AF VF interrupts for
2296 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2297 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2300 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2301 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2302 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2304 /* Same for remaining VFs, if any. */
2308 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2309 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2310 INTR_MASK(vfs - 64));
2312 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2313 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2314 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2317 #define PCI_DEVID_OCTEONTX2_LBK 0xA061
2319 static int lbk_get_num_chans(void)
2321 struct pci_dev *pdev;
2325 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2330 base = pci_ioremap_bar(pdev, 0);
2334 /* Read number of available LBK channels from LBK(0)_CONST register. */
2335 ret = (readq(base + 0x10) >> 32) & 0xffff;
2343 static int rvu_enable_sriov(struct rvu *rvu)
2345 struct pci_dev *pdev = rvu->pdev;
2346 int err, chans, vfs;
2348 if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
2349 dev_warn(&pdev->dev,
2350 "Skipping SRIOV enablement since not enough IRQs are available\n");
2354 chans = lbk_get_num_chans();
2358 vfs = pci_sriov_get_totalvfs(pdev);
2360 /* Limit VFs in case we have more VFs than LBK channels available. */
2364 /* AF's VFs work in pairs and talk over consecutive loopback channels.
2365 * Thus we want to enable maximum even number of VFs. In case
2366 * odd number of VFs are available then the last VF on the list
2370 dev_warn(&pdev->dev,
2371 "Number of VFs should be even. Enabling %d out of %d.\n",
2379 /* Save VFs number for reference in VF interrupts handlers.
2380 * Since interrupts might start arriving during SRIOV enablement
2381 * ordinary API cannot be used to get number of enabled VFs.
2385 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
2386 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
2390 rvu_enable_afvf_intr(rvu);
2391 /* Make sure IRQs are enabled before SRIOV. */
2394 err = pci_enable_sriov(pdev, vfs);
2396 rvu_disable_afvf_intr(rvu);
2397 rvu_mbox_destroy(&rvu->afvf_wq_info);
2404 static void rvu_disable_sriov(struct rvu *rvu)
2406 rvu_disable_afvf_intr(rvu);
2407 rvu_mbox_destroy(&rvu->afvf_wq_info);
2408 pci_disable_sriov(rvu->pdev);
2411 static void rvu_update_module_params(struct rvu *rvu)
2413 const char *default_pfl_name = "default";
2415 strscpy(rvu->mkex_pfl_name,
2416 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
2419 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2421 struct device *dev = &pdev->dev;
2425 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
2429 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
2431 devm_kfree(dev, rvu);
2435 pci_set_drvdata(pdev, rvu);
2437 rvu->dev = &pdev->dev;
2439 err = pci_enable_device(pdev);
2441 dev_err(dev, "Failed to enable PCI device\n");
2445 err = pci_request_regions(pdev, DRV_NAME);
2447 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2448 goto err_disable_device;
2451 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
2453 dev_err(dev, "Unable to set DMA mask\n");
2454 goto err_release_regions;
2457 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
2459 dev_err(dev, "Unable to set consistent DMA mask\n");
2460 goto err_release_regions;
2463 /* Map Admin function CSRs */
2464 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
2465 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
2466 if (!rvu->afreg_base || !rvu->pfreg_base) {
2467 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
2469 goto err_release_regions;
2472 /* Store module params in rvu structure */
2473 rvu_update_module_params(rvu);
2475 /* Check which blocks the HW supports */
2476 rvu_check_block_implemented(rvu);
2478 rvu_reset_all_blocks(rvu);
2480 rvu_setup_hw_capabilities(rvu);
2482 err = rvu_setup_hw_resources(rvu);
2484 goto err_release_regions;
2486 /* Init mailbox btw AF and PFs */
2487 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
2488 rvu->hw->total_pfs, rvu_afpf_mbox_handler,
2489 rvu_afpf_mbox_up_handler);
2493 err = rvu_flr_init(rvu);
2497 err = rvu_register_interrupts(rvu);
2501 /* Enable AF's VFs (if any) */
2502 err = rvu_enable_sriov(rvu);
2506 /* Initialize debugfs */
2511 rvu_unregister_interrupts(rvu);
2513 rvu_flr_wq_destroy(rvu);
2515 rvu_mbox_destroy(&rvu->afpf_wq_info);
2518 rvu_reset_all_blocks(rvu);
2519 rvu_free_hw_resources(rvu);
2520 err_release_regions:
2521 pci_release_regions(pdev);
2523 pci_disable_device(pdev);
2525 pci_set_drvdata(pdev, NULL);
2526 devm_kfree(&pdev->dev, rvu->hw);
2527 devm_kfree(dev, rvu);
2531 static void rvu_remove(struct pci_dev *pdev)
2533 struct rvu *rvu = pci_get_drvdata(pdev);
2536 rvu_unregister_interrupts(rvu);
2537 rvu_flr_wq_destroy(rvu);
2539 rvu_mbox_destroy(&rvu->afpf_wq_info);
2540 rvu_disable_sriov(rvu);
2541 rvu_reset_all_blocks(rvu);
2542 rvu_free_hw_resources(rvu);
2544 pci_release_regions(pdev);
2545 pci_disable_device(pdev);
2546 pci_set_drvdata(pdev, NULL);
2548 devm_kfree(&pdev->dev, rvu->hw);
2549 devm_kfree(&pdev->dev, rvu);
2552 static struct pci_driver rvu_driver = {
2554 .id_table = rvu_id_table,
2556 .remove = rvu_remove,
2559 static int __init rvu_init_module(void)
2563 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2565 err = pci_register_driver(&cgx_driver);
2569 err = pci_register_driver(&rvu_driver);
2571 pci_unregister_driver(&cgx_driver);
2576 static void __exit rvu_cleanup_module(void)
2578 pci_unregister_driver(&rvu_driver);
2579 pci_unregister_driver(&cgx_driver);
2582 module_init(rvu_init_module);
2583 module_exit(rvu_cleanup_module);