]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/marvell/octeontx2/af/rvu.c
bf9272fafd693e9e3b2adaf888a11143b0b861f1
[linux.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/delay.h>
14 #include <linux/irq.h>
15 #include <linux/pci.h>
16 #include <linux/sysfs.h>
17
18 #include "cgx.h"
19 #include "rvu.h"
20 #include "rvu_reg.h"
21
22 #define DRV_NAME        "octeontx2-af"
23 #define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
24 #define DRV_VERSION     "1.0"
25
26 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
27
28 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
29                                 struct rvu_block *block, int lf);
30 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
31                                   struct rvu_block *block, int lf);
32 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
33
34 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
35                          int type, int num,
36                          void (mbox_handler)(struct work_struct *),
37                          void (mbox_up_handler)(struct work_struct *));
38 enum {
39         TYPE_AFVF,
40         TYPE_AFPF,
41 };
42
43 /* Supported devices */
44 static const struct pci_device_id rvu_id_table[] = {
45         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
46         { 0, }  /* end of table */
47 };
48
49 MODULE_AUTHOR("Marvell International Ltd.");
50 MODULE_DESCRIPTION(DRV_STRING);
51 MODULE_LICENSE("GPL v2");
52 MODULE_VERSION(DRV_VERSION);
53 MODULE_DEVICE_TABLE(pci, rvu_id_table);
54
55 static char *mkex_profile; /* MKEX profile name */
56 module_param(mkex_profile, charp, 0000);
57 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
58
59 static void rvu_setup_hw_capabilities(struct rvu *rvu)
60 {
61         struct rvu_hwinfo *hw = rvu->hw;
62
63         hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
64         hw->cap.nix_fixed_txschq_mapping = false;
65         hw->cap.nix_shaping = true;
66         hw->cap.nix_tx_link_bp = true;
67
68         if (is_rvu_96xx_B0(rvu)) {
69                 hw->cap.nix_fixed_txschq_mapping = true;
70                 hw->cap.nix_txsch_per_cgx_lmac = 4;
71                 hw->cap.nix_txsch_per_lbk_lmac = 132;
72                 hw->cap.nix_txsch_per_sdp_lmac = 76;
73                 hw->cap.nix_shaping = false;
74                 hw->cap.nix_tx_link_bp = false;
75         }
76 }
77
78 /* Poll a RVU block's register 'offset', for a 'zero'
79  * or 'nonzero' at bits specified by 'mask'
80  */
81 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
82 {
83         unsigned long timeout = jiffies + usecs_to_jiffies(10000);
84         void __iomem *reg;
85         u64 reg_val;
86
87         reg = rvu->afreg_base + ((block << 28) | offset);
88         while (time_before(jiffies, timeout)) {
89                 reg_val = readq(reg);
90                 if (zero && !(reg_val & mask))
91                         return 0;
92                 if (!zero && (reg_val & mask))
93                         return 0;
94                 usleep_range(1, 5);
95         }
96         return -EBUSY;
97 }
98
99 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
100 {
101         int id;
102
103         if (!rsrc->bmap)
104                 return -EINVAL;
105
106         id = find_first_zero_bit(rsrc->bmap, rsrc->max);
107         if (id >= rsrc->max)
108                 return -ENOSPC;
109
110         __set_bit(id, rsrc->bmap);
111
112         return id;
113 }
114
115 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
116 {
117         int start;
118
119         if (!rsrc->bmap)
120                 return -EINVAL;
121
122         start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
123         if (start >= rsrc->max)
124                 return -ENOSPC;
125
126         bitmap_set(rsrc->bmap, start, nrsrc);
127         return start;
128 }
129
130 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
131 {
132         if (!rsrc->bmap)
133                 return;
134         if (start >= rsrc->max)
135                 return;
136
137         bitmap_clear(rsrc->bmap, start, nrsrc);
138 }
139
140 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
141 {
142         int start;
143
144         if (!rsrc->bmap)
145                 return false;
146
147         start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
148         if (start >= rsrc->max)
149                 return false;
150
151         return true;
152 }
153
154 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
155 {
156         if (!rsrc->bmap)
157                 return;
158
159         __clear_bit(id, rsrc->bmap);
160 }
161
162 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
163 {
164         int used;
165
166         if (!rsrc->bmap)
167                 return 0;
168
169         used = bitmap_weight(rsrc->bmap, rsrc->max);
170         return (rsrc->max - used);
171 }
172
173 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
174 {
175         rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
176                              sizeof(long), GFP_KERNEL);
177         if (!rsrc->bmap)
178                 return -ENOMEM;
179         return 0;
180 }
181
182 /* Get block LF's HW index from a PF_FUNC's block slot number */
183 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
184 {
185         u16 match = 0;
186         int lf;
187
188         mutex_lock(&rvu->rsrc_lock);
189         for (lf = 0; lf < block->lf.max; lf++) {
190                 if (block->fn_map[lf] == pcifunc) {
191                         if (slot == match) {
192                                 mutex_unlock(&rvu->rsrc_lock);
193                                 return lf;
194                         }
195                         match++;
196                 }
197         }
198         mutex_unlock(&rvu->rsrc_lock);
199         return -ENODEV;
200 }
201
202 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
203  * Some silicon variants of OcteonTX2 supports
204  * multiple blocks of same type.
205  *
206  * @pcifunc has to be zero when no LF is yet attached.
207  */
208 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
209 {
210         int devnum, blkaddr = -ENODEV;
211         u64 cfg, reg;
212         bool is_pf;
213
214         switch (blktype) {
215         case BLKTYPE_NPC:
216                 blkaddr = BLKADDR_NPC;
217                 goto exit;
218         case BLKTYPE_NPA:
219                 blkaddr = BLKADDR_NPA;
220                 goto exit;
221         case BLKTYPE_NIX:
222                 /* For now assume NIX0 */
223                 if (!pcifunc) {
224                         blkaddr = BLKADDR_NIX0;
225                         goto exit;
226                 }
227                 break;
228         case BLKTYPE_SSO:
229                 blkaddr = BLKADDR_SSO;
230                 goto exit;
231         case BLKTYPE_SSOW:
232                 blkaddr = BLKADDR_SSOW;
233                 goto exit;
234         case BLKTYPE_TIM:
235                 blkaddr = BLKADDR_TIM;
236                 goto exit;
237         case BLKTYPE_CPT:
238                 /* For now assume CPT0 */
239                 if (!pcifunc) {
240                         blkaddr = BLKADDR_CPT0;
241                         goto exit;
242                 }
243                 break;
244         }
245
246         /* Check if this is a RVU PF or VF */
247         if (pcifunc & RVU_PFVF_FUNC_MASK) {
248                 is_pf = false;
249                 devnum = rvu_get_hwvf(rvu, pcifunc);
250         } else {
251                 is_pf = true;
252                 devnum = rvu_get_pf(pcifunc);
253         }
254
255         /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
256         if (blktype == BLKTYPE_NIX) {
257                 reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
258                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
259                 if (cfg)
260                         blkaddr = BLKADDR_NIX0;
261         }
262
263         /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
264         if (blktype == BLKTYPE_CPT) {
265                 reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
266                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
267                 if (cfg)
268                         blkaddr = BLKADDR_CPT0;
269         }
270
271 exit:
272         if (is_block_implemented(rvu->hw, blkaddr))
273                 return blkaddr;
274         return -ENODEV;
275 }
276
277 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
278                                 struct rvu_block *block, u16 pcifunc,
279                                 u16 lf, bool attach)
280 {
281         int devnum, num_lfs = 0;
282         bool is_pf;
283         u64 reg;
284
285         if (lf >= block->lf.max) {
286                 dev_err(&rvu->pdev->dev,
287                         "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
288                         __func__, lf, block->name, block->lf.max);
289                 return;
290         }
291
292         /* Check if this is for a RVU PF or VF */
293         if (pcifunc & RVU_PFVF_FUNC_MASK) {
294                 is_pf = false;
295                 devnum = rvu_get_hwvf(rvu, pcifunc);
296         } else {
297                 is_pf = true;
298                 devnum = rvu_get_pf(pcifunc);
299         }
300
301         block->fn_map[lf] = attach ? pcifunc : 0;
302
303         switch (block->type) {
304         case BLKTYPE_NPA:
305                 pfvf->npalf = attach ? true : false;
306                 num_lfs = pfvf->npalf;
307                 break;
308         case BLKTYPE_NIX:
309                 pfvf->nixlf = attach ? true : false;
310                 num_lfs = pfvf->nixlf;
311                 break;
312         case BLKTYPE_SSO:
313                 attach ? pfvf->sso++ : pfvf->sso--;
314                 num_lfs = pfvf->sso;
315                 break;
316         case BLKTYPE_SSOW:
317                 attach ? pfvf->ssow++ : pfvf->ssow--;
318                 num_lfs = pfvf->ssow;
319                 break;
320         case BLKTYPE_TIM:
321                 attach ? pfvf->timlfs++ : pfvf->timlfs--;
322                 num_lfs = pfvf->timlfs;
323                 break;
324         case BLKTYPE_CPT:
325                 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
326                 num_lfs = pfvf->cptlfs;
327                 break;
328         }
329
330         reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
331         rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
332 }
333
334 inline int rvu_get_pf(u16 pcifunc)
335 {
336         return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
337 }
338
339 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
340 {
341         u64 cfg;
342
343         /* Get numVFs attached to this PF and first HWVF */
344         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
345         *numvfs = (cfg >> 12) & 0xFF;
346         *hwvf = cfg & 0xFFF;
347 }
348
349 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
350 {
351         int pf, func;
352         u64 cfg;
353
354         pf = rvu_get_pf(pcifunc);
355         func = pcifunc & RVU_PFVF_FUNC_MASK;
356
357         /* Get first HWVF attached to this PF */
358         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
359
360         return ((cfg & 0xFFF) + func - 1);
361 }
362
363 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
364 {
365         /* Check if it is a PF or VF */
366         if (pcifunc & RVU_PFVF_FUNC_MASK)
367                 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
368         else
369                 return &rvu->pf[rvu_get_pf(pcifunc)];
370 }
371
372 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
373 {
374         int pf, vf, nvfs;
375         u64 cfg;
376
377         pf = rvu_get_pf(pcifunc);
378         if (pf >= rvu->hw->total_pfs)
379                 return false;
380
381         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
382                 return true;
383
384         /* Check if VF is within number of VFs attached to this PF */
385         vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
386         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
387         nvfs = (cfg >> 12) & 0xFF;
388         if (vf >= nvfs)
389                 return false;
390
391         return true;
392 }
393
394 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
395 {
396         struct rvu_block *block;
397
398         if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
399                 return false;
400
401         block = &hw->block[blkaddr];
402         return block->implemented;
403 }
404
405 static void rvu_check_block_implemented(struct rvu *rvu)
406 {
407         struct rvu_hwinfo *hw = rvu->hw;
408         struct rvu_block *block;
409         int blkid;
410         u64 cfg;
411
412         /* For each block check if 'implemented' bit is set */
413         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
414                 block = &hw->block[blkid];
415                 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
416                 if (cfg & BIT_ULL(11))
417                         block->implemented = true;
418         }
419 }
420
421 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
422 {
423         int err;
424
425         if (!block->implemented)
426                 return 0;
427
428         rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
429         err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
430                            true);
431         return err;
432 }
433
434 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
435 {
436         struct rvu_block *block = &rvu->hw->block[blkaddr];
437
438         if (!block->implemented)
439                 return;
440
441         rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
442         rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
443 }
444
445 static void rvu_reset_all_blocks(struct rvu *rvu)
446 {
447         /* Do a HW reset of all RVU blocks */
448         rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
449         rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
450         rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
451         rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
452         rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
453         rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
454         rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
455         rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
456         rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
457 }
458
459 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
460 {
461         struct rvu_pfvf *pfvf;
462         u64 cfg;
463         int lf;
464
465         for (lf = 0; lf < block->lf.max; lf++) {
466                 cfg = rvu_read64(rvu, block->addr,
467                                  block->lfcfg_reg | (lf << block->lfshift));
468                 if (!(cfg & BIT_ULL(63)))
469                         continue;
470
471                 /* Set this resource as being used */
472                 __set_bit(lf, block->lf.bmap);
473
474                 /* Get, to whom this LF is attached */
475                 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
476                 rvu_update_rsrc_map(rvu, pfvf, block,
477                                     (cfg >> 8) & 0xFFFF, lf, true);
478
479                 /* Set start MSIX vector for this LF within this PF/VF */
480                 rvu_set_msix_offset(rvu, pfvf, block, lf);
481         }
482 }
483
484 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
485 {
486         int min_vecs;
487
488         if (!vf)
489                 goto check_pf;
490
491         if (!nvecs) {
492                 dev_warn(rvu->dev,
493                          "PF%d:VF%d is configured with zero msix vectors, %d\n",
494                          pf, vf - 1, nvecs);
495         }
496         return;
497
498 check_pf:
499         if (pf == 0)
500                 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
501         else
502                 min_vecs = RVU_PF_INT_VEC_CNT;
503
504         if (!(nvecs < min_vecs))
505                 return;
506         dev_warn(rvu->dev,
507                  "PF%d is configured with too few vectors, %d, min is %d\n",
508                  pf, nvecs, min_vecs);
509 }
510
511 static int rvu_setup_msix_resources(struct rvu *rvu)
512 {
513         struct rvu_hwinfo *hw = rvu->hw;
514         int pf, vf, numvfs, hwvf, err;
515         int nvecs, offset, max_msix;
516         struct rvu_pfvf *pfvf;
517         u64 cfg, phy_addr;
518         dma_addr_t iova;
519
520         for (pf = 0; pf < hw->total_pfs; pf++) {
521                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
522                 /* If PF is not enabled, nothing to do */
523                 if (!((cfg >> 20) & 0x01))
524                         continue;
525
526                 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
527
528                 pfvf = &rvu->pf[pf];
529                 /* Get num of MSIX vectors attached to this PF */
530                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
531                 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
532                 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
533
534                 /* Alloc msix bitmap for this PF */
535                 err = rvu_alloc_bitmap(&pfvf->msix);
536                 if (err)
537                         return err;
538
539                 /* Allocate memory for MSIX vector to RVU block LF mapping */
540                 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
541                                                 sizeof(u16), GFP_KERNEL);
542                 if (!pfvf->msix_lfmap)
543                         return -ENOMEM;
544
545                 /* For PF0 (AF) firmware will set msix vector offsets for
546                  * AF, block AF and PF0_INT vectors, so jump to VFs.
547                  */
548                 if (!pf)
549                         goto setup_vfmsix;
550
551                 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
552                  * These are allocated on driver init and never freed,
553                  * so no need to set 'msix_lfmap' for these.
554                  */
555                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
556                 nvecs = (cfg >> 12) & 0xFF;
557                 cfg &= ~0x7FFULL;
558                 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
559                 rvu_write64(rvu, BLKADDR_RVUM,
560                             RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
561 setup_vfmsix:
562                 /* Alloc msix bitmap for VFs */
563                 for (vf = 0; vf < numvfs; vf++) {
564                         pfvf =  &rvu->hwvf[hwvf + vf];
565                         /* Get num of MSIX vectors attached to this VF */
566                         cfg = rvu_read64(rvu, BLKADDR_RVUM,
567                                          RVU_PRIV_PFX_MSIX_CFG(pf));
568                         pfvf->msix.max = (cfg & 0xFFF) + 1;
569                         rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
570
571                         /* Alloc msix bitmap for this VF */
572                         err = rvu_alloc_bitmap(&pfvf->msix);
573                         if (err)
574                                 return err;
575
576                         pfvf->msix_lfmap =
577                                 devm_kcalloc(rvu->dev, pfvf->msix.max,
578                                              sizeof(u16), GFP_KERNEL);
579                         if (!pfvf->msix_lfmap)
580                                 return -ENOMEM;
581
582                         /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
583                          * These are allocated on driver init and never freed,
584                          * so no need to set 'msix_lfmap' for these.
585                          */
586                         cfg = rvu_read64(rvu, BLKADDR_RVUM,
587                                          RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
588                         nvecs = (cfg >> 12) & 0xFF;
589                         cfg &= ~0x7FFULL;
590                         offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
591                         rvu_write64(rvu, BLKADDR_RVUM,
592                                     RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
593                                     cfg | offset);
594                 }
595         }
596
597         /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
598          * create a IOMMU mapping for the physcial address configured by
599          * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
600          */
601         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
602         max_msix = cfg & 0xFFFFF;
603         phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
604         iova = dma_map_resource(rvu->dev, phy_addr,
605                                 max_msix * PCI_MSIX_ENTRY_SIZE,
606                                 DMA_BIDIRECTIONAL, 0);
607
608         if (dma_mapping_error(rvu->dev, iova))
609                 return -ENOMEM;
610
611         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
612         rvu->msix_base_iova = iova;
613
614         return 0;
615 }
616
617 static void rvu_free_hw_resources(struct rvu *rvu)
618 {
619         struct rvu_hwinfo *hw = rvu->hw;
620         struct rvu_block *block;
621         struct rvu_pfvf  *pfvf;
622         int id, max_msix;
623         u64 cfg;
624
625         rvu_npa_freemem(rvu);
626         rvu_npc_freemem(rvu);
627         rvu_nix_freemem(rvu);
628
629         /* Free block LF bitmaps */
630         for (id = 0; id < BLK_COUNT; id++) {
631                 block = &hw->block[id];
632                 kfree(block->lf.bmap);
633         }
634
635         /* Free MSIX bitmaps */
636         for (id = 0; id < hw->total_pfs; id++) {
637                 pfvf = &rvu->pf[id];
638                 kfree(pfvf->msix.bmap);
639         }
640
641         for (id = 0; id < hw->total_vfs; id++) {
642                 pfvf = &rvu->hwvf[id];
643                 kfree(pfvf->msix.bmap);
644         }
645
646         /* Unmap MSIX vector base IOVA mapping */
647         if (!rvu->msix_base_iova)
648                 return;
649         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
650         max_msix = cfg & 0xFFFFF;
651         dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
652                            max_msix * PCI_MSIX_ENTRY_SIZE,
653                            DMA_BIDIRECTIONAL, 0);
654
655         mutex_destroy(&rvu->rsrc_lock);
656 }
657
658 static int rvu_setup_hw_resources(struct rvu *rvu)
659 {
660         struct rvu_hwinfo *hw = rvu->hw;
661         struct rvu_block *block;
662         int blkid, err;
663         u64 cfg;
664
665         /* Get HW supported max RVU PF & VF count */
666         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
667         hw->total_pfs = (cfg >> 32) & 0xFF;
668         hw->total_vfs = (cfg >> 20) & 0xFFF;
669         hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
670
671         /* Init NPA LF's bitmap */
672         block = &hw->block[BLKADDR_NPA];
673         if (!block->implemented)
674                 goto nix;
675         cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
676         block->lf.max = (cfg >> 16) & 0xFFF;
677         block->addr = BLKADDR_NPA;
678         block->type = BLKTYPE_NPA;
679         block->lfshift = 8;
680         block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
681         block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
682         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
683         block->lfcfg_reg = NPA_PRIV_LFX_CFG;
684         block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
685         block->lfreset_reg = NPA_AF_LF_RST;
686         sprintf(block->name, "NPA");
687         err = rvu_alloc_bitmap(&block->lf);
688         if (err)
689                 return err;
690
691 nix:
692         /* Init NIX LF's bitmap */
693         block = &hw->block[BLKADDR_NIX0];
694         if (!block->implemented)
695                 goto sso;
696         cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
697         block->lf.max = cfg & 0xFFF;
698         block->addr = BLKADDR_NIX0;
699         block->type = BLKTYPE_NIX;
700         block->lfshift = 8;
701         block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
702         block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
703         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
704         block->lfcfg_reg = NIX_PRIV_LFX_CFG;
705         block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
706         block->lfreset_reg = NIX_AF_LF_RST;
707         sprintf(block->name, "NIX");
708         err = rvu_alloc_bitmap(&block->lf);
709         if (err)
710                 return err;
711
712 sso:
713         /* Init SSO group's bitmap */
714         block = &hw->block[BLKADDR_SSO];
715         if (!block->implemented)
716                 goto ssow;
717         cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
718         block->lf.max = cfg & 0xFFFF;
719         block->addr = BLKADDR_SSO;
720         block->type = BLKTYPE_SSO;
721         block->multislot = true;
722         block->lfshift = 3;
723         block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
724         block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
725         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
726         block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
727         block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
728         block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
729         sprintf(block->name, "SSO GROUP");
730         err = rvu_alloc_bitmap(&block->lf);
731         if (err)
732                 return err;
733
734 ssow:
735         /* Init SSO workslot's bitmap */
736         block = &hw->block[BLKADDR_SSOW];
737         if (!block->implemented)
738                 goto tim;
739         block->lf.max = (cfg >> 56) & 0xFF;
740         block->addr = BLKADDR_SSOW;
741         block->type = BLKTYPE_SSOW;
742         block->multislot = true;
743         block->lfshift = 3;
744         block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
745         block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
746         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
747         block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
748         block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
749         block->lfreset_reg = SSOW_AF_LF_HWS_RST;
750         sprintf(block->name, "SSOWS");
751         err = rvu_alloc_bitmap(&block->lf);
752         if (err)
753                 return err;
754
755 tim:
756         /* Init TIM LF's bitmap */
757         block = &hw->block[BLKADDR_TIM];
758         if (!block->implemented)
759                 goto cpt;
760         cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
761         block->lf.max = cfg & 0xFFFF;
762         block->addr = BLKADDR_TIM;
763         block->type = BLKTYPE_TIM;
764         block->multislot = true;
765         block->lfshift = 3;
766         block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
767         block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
768         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
769         block->lfcfg_reg = TIM_PRIV_LFX_CFG;
770         block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
771         block->lfreset_reg = TIM_AF_LF_RST;
772         sprintf(block->name, "TIM");
773         err = rvu_alloc_bitmap(&block->lf);
774         if (err)
775                 return err;
776
777 cpt:
778         /* Init CPT LF's bitmap */
779         block = &hw->block[BLKADDR_CPT0];
780         if (!block->implemented)
781                 goto init;
782         cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
783         block->lf.max = cfg & 0xFF;
784         block->addr = BLKADDR_CPT0;
785         block->type = BLKTYPE_CPT;
786         block->multislot = true;
787         block->lfshift = 3;
788         block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
789         block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
790         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
791         block->lfcfg_reg = CPT_PRIV_LFX_CFG;
792         block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
793         block->lfreset_reg = CPT_AF_LF_RST;
794         sprintf(block->name, "CPT");
795         err = rvu_alloc_bitmap(&block->lf);
796         if (err)
797                 return err;
798
799 init:
800         /* Allocate memory for PFVF data */
801         rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
802                                sizeof(struct rvu_pfvf), GFP_KERNEL);
803         if (!rvu->pf)
804                 return -ENOMEM;
805
806         rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
807                                  sizeof(struct rvu_pfvf), GFP_KERNEL);
808         if (!rvu->hwvf)
809                 return -ENOMEM;
810
811         mutex_init(&rvu->rsrc_lock);
812
813         err = rvu_setup_msix_resources(rvu);
814         if (err)
815                 return err;
816
817         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
818                 block = &hw->block[blkid];
819                 if (!block->lf.bmap)
820                         continue;
821
822                 /* Allocate memory for block LF/slot to pcifunc mapping info */
823                 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
824                                              sizeof(u16), GFP_KERNEL);
825                 if (!block->fn_map)
826                         return -ENOMEM;
827
828                 /* Scan all blocks to check if low level firmware has
829                  * already provisioned any of the resources to a PF/VF.
830                  */
831                 rvu_scan_block(rvu, block);
832         }
833
834         err = rvu_npc_init(rvu);
835         if (err)
836                 goto exit;
837
838         err = rvu_cgx_init(rvu);
839         if (err)
840                 goto exit;
841
842         err = rvu_npa_init(rvu);
843         if (err)
844                 goto cgx_err;
845
846         err = rvu_nix_init(rvu);
847         if (err)
848                 goto cgx_err;
849
850         return 0;
851
852 cgx_err:
853         rvu_cgx_exit(rvu);
854 exit:
855         return err;
856 }
857
858 /* NPA and NIX admin queue APIs */
859 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
860 {
861         if (!aq)
862                 return;
863
864         qmem_free(rvu->dev, aq->inst);
865         qmem_free(rvu->dev, aq->res);
866         devm_kfree(rvu->dev, aq);
867 }
868
869 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
870                  int qsize, int inst_size, int res_size)
871 {
872         struct admin_queue *aq;
873         int err;
874
875         *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
876         if (!*ad_queue)
877                 return -ENOMEM;
878         aq = *ad_queue;
879
880         /* Alloc memory for instructions i.e AQ */
881         err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
882         if (err) {
883                 devm_kfree(rvu->dev, aq);
884                 return err;
885         }
886
887         /* Alloc memory for results */
888         err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
889         if (err) {
890                 rvu_aq_free(rvu, aq);
891                 return err;
892         }
893
894         spin_lock_init(&aq->lock);
895         return 0;
896 }
897
898 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
899                            struct ready_msg_rsp *rsp)
900 {
901         return 0;
902 }
903
904 /* Get current count of a RVU block's LF/slots
905  * provisioned to a given RVU func.
906  */
907 static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
908 {
909         switch (blktype) {
910         case BLKTYPE_NPA:
911                 return pfvf->npalf ? 1 : 0;
912         case BLKTYPE_NIX:
913                 return pfvf->nixlf ? 1 : 0;
914         case BLKTYPE_SSO:
915                 return pfvf->sso;
916         case BLKTYPE_SSOW:
917                 return pfvf->ssow;
918         case BLKTYPE_TIM:
919                 return pfvf->timlfs;
920         case BLKTYPE_CPT:
921                 return pfvf->cptlfs;
922         }
923         return 0;
924 }
925
926 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
927 {
928         struct rvu_pfvf *pfvf;
929
930         if (!is_pf_func_valid(rvu, pcifunc))
931                 return false;
932
933         pfvf = rvu_get_pfvf(rvu, pcifunc);
934
935         /* Check if this PFFUNC has a LF of type blktype attached */
936         if (!rvu_get_rsrc_mapcount(pfvf, blktype))
937                 return false;
938
939         return true;
940 }
941
942 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
943                            int pcifunc, int slot)
944 {
945         u64 val;
946
947         val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
948         rvu_write64(rvu, block->addr, block->lookup_reg, val);
949         /* Wait for the lookup to finish */
950         /* TODO: put some timeout here */
951         while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
952                 ;
953
954         val = rvu_read64(rvu, block->addr, block->lookup_reg);
955
956         /* Check LF valid bit */
957         if (!(val & (1ULL << 12)))
958                 return -1;
959
960         return (val & 0xFFF);
961 }
962
963 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
964 {
965         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
966         struct rvu_hwinfo *hw = rvu->hw;
967         struct rvu_block *block;
968         int slot, lf, num_lfs;
969         int blkaddr;
970
971         blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
972         if (blkaddr < 0)
973                 return;
974
975         block = &hw->block[blkaddr];
976
977         num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
978         if (!num_lfs)
979                 return;
980
981         for (slot = 0; slot < num_lfs; slot++) {
982                 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
983                 if (lf < 0) /* This should never happen */
984                         continue;
985
986                 /* Disable the LF */
987                 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
988                             (lf << block->lfshift), 0x00ULL);
989
990                 /* Update SW maintained mapping info as well */
991                 rvu_update_rsrc_map(rvu, pfvf, block,
992                                     pcifunc, lf, false);
993
994                 /* Free the resource */
995                 rvu_free_rsrc(&block->lf, lf);
996
997                 /* Clear MSIX vector offset for this LF */
998                 rvu_clear_msix_offset(rvu, pfvf, block, lf);
999         }
1000 }
1001
1002 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1003                             u16 pcifunc)
1004 {
1005         struct rvu_hwinfo *hw = rvu->hw;
1006         bool detach_all = true;
1007         struct rvu_block *block;
1008         int blkid;
1009
1010         mutex_lock(&rvu->rsrc_lock);
1011
1012         /* Check for partial resource detach */
1013         if (detach && detach->partial)
1014                 detach_all = false;
1015
1016         /* Check for RVU block's LFs attached to this func,
1017          * if so, detach them.
1018          */
1019         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1020                 block = &hw->block[blkid];
1021                 if (!block->lf.bmap)
1022                         continue;
1023                 if (!detach_all && detach) {
1024                         if (blkid == BLKADDR_NPA && !detach->npalf)
1025                                 continue;
1026                         else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1027                                 continue;
1028                         else if ((blkid == BLKADDR_SSO) && !detach->sso)
1029                                 continue;
1030                         else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1031                                 continue;
1032                         else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1033                                 continue;
1034                         else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1035                                 continue;
1036                 }
1037                 rvu_detach_block(rvu, pcifunc, block->type);
1038         }
1039
1040         mutex_unlock(&rvu->rsrc_lock);
1041         return 0;
1042 }
1043
1044 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1045                                       struct rsrc_detach *detach,
1046                                       struct msg_rsp *rsp)
1047 {
1048         return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1049 }
1050
1051 static void rvu_attach_block(struct rvu *rvu, int pcifunc,
1052                              int blktype, int num_lfs)
1053 {
1054         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1055         struct rvu_hwinfo *hw = rvu->hw;
1056         struct rvu_block *block;
1057         int slot, lf;
1058         int blkaddr;
1059         u64 cfg;
1060
1061         if (!num_lfs)
1062                 return;
1063
1064         blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
1065         if (blkaddr < 0)
1066                 return;
1067
1068         block = &hw->block[blkaddr];
1069         if (!block->lf.bmap)
1070                 return;
1071
1072         for (slot = 0; slot < num_lfs; slot++) {
1073                 /* Allocate the resource */
1074                 lf = rvu_alloc_rsrc(&block->lf);
1075                 if (lf < 0)
1076                         return;
1077
1078                 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1079                 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1080                             (lf << block->lfshift), cfg);
1081                 rvu_update_rsrc_map(rvu, pfvf, block,
1082                                     pcifunc, lf, true);
1083
1084                 /* Set start MSIX vector for this LF within this PF/VF */
1085                 rvu_set_msix_offset(rvu, pfvf, block, lf);
1086         }
1087 }
1088
1089 static int rvu_check_rsrc_availability(struct rvu *rvu,
1090                                        struct rsrc_attach *req, u16 pcifunc)
1091 {
1092         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1093         struct rvu_hwinfo *hw = rvu->hw;
1094         struct rvu_block *block;
1095         int free_lfs, mappedlfs;
1096
1097         /* Only one NPA LF can be attached */
1098         if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
1099                 block = &hw->block[BLKADDR_NPA];
1100                 free_lfs = rvu_rsrc_free_count(&block->lf);
1101                 if (!free_lfs)
1102                         goto fail;
1103         } else if (req->npalf) {
1104                 dev_err(&rvu->pdev->dev,
1105                         "Func 0x%x: Invalid req, already has NPA\n",
1106                          pcifunc);
1107                 return -EINVAL;
1108         }
1109
1110         /* Only one NIX LF can be attached */
1111         if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
1112                 block = &hw->block[BLKADDR_NIX0];
1113                 free_lfs = rvu_rsrc_free_count(&block->lf);
1114                 if (!free_lfs)
1115                         goto fail;
1116         } else if (req->nixlf) {
1117                 dev_err(&rvu->pdev->dev,
1118                         "Func 0x%x: Invalid req, already has NIX\n",
1119                         pcifunc);
1120                 return -EINVAL;
1121         }
1122
1123         if (req->sso) {
1124                 block = &hw->block[BLKADDR_SSO];
1125                 /* Is request within limits ? */
1126                 if (req->sso > block->lf.max) {
1127                         dev_err(&rvu->pdev->dev,
1128                                 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1129                                  pcifunc, req->sso, block->lf.max);
1130                         return -EINVAL;
1131                 }
1132                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1133                 free_lfs = rvu_rsrc_free_count(&block->lf);
1134                 /* Check if additional resources are available */
1135                 if (req->sso > mappedlfs &&
1136                     ((req->sso - mappedlfs) > free_lfs))
1137                         goto fail;
1138         }
1139
1140         if (req->ssow) {
1141                 block = &hw->block[BLKADDR_SSOW];
1142                 if (req->ssow > block->lf.max) {
1143                         dev_err(&rvu->pdev->dev,
1144                                 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1145                                  pcifunc, req->sso, block->lf.max);
1146                         return -EINVAL;
1147                 }
1148                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1149                 free_lfs = rvu_rsrc_free_count(&block->lf);
1150                 if (req->ssow > mappedlfs &&
1151                     ((req->ssow - mappedlfs) > free_lfs))
1152                         goto fail;
1153         }
1154
1155         if (req->timlfs) {
1156                 block = &hw->block[BLKADDR_TIM];
1157                 if (req->timlfs > block->lf.max) {
1158                         dev_err(&rvu->pdev->dev,
1159                                 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1160                                  pcifunc, req->timlfs, block->lf.max);
1161                         return -EINVAL;
1162                 }
1163                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1164                 free_lfs = rvu_rsrc_free_count(&block->lf);
1165                 if (req->timlfs > mappedlfs &&
1166                     ((req->timlfs - mappedlfs) > free_lfs))
1167                         goto fail;
1168         }
1169
1170         if (req->cptlfs) {
1171                 block = &hw->block[BLKADDR_CPT0];
1172                 if (req->cptlfs > block->lf.max) {
1173                         dev_err(&rvu->pdev->dev,
1174                                 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1175                                  pcifunc, req->cptlfs, block->lf.max);
1176                         return -EINVAL;
1177                 }
1178                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1179                 free_lfs = rvu_rsrc_free_count(&block->lf);
1180                 if (req->cptlfs > mappedlfs &&
1181                     ((req->cptlfs - mappedlfs) > free_lfs))
1182                         goto fail;
1183         }
1184
1185         return 0;
1186
1187 fail:
1188         dev_info(rvu->dev, "Request for %s failed\n", block->name);
1189         return -ENOSPC;
1190 }
1191
1192 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1193                                       struct rsrc_attach *attach,
1194                                       struct msg_rsp *rsp)
1195 {
1196         u16 pcifunc = attach->hdr.pcifunc;
1197         int err;
1198
1199         /* If first request, detach all existing attached resources */
1200         if (!attach->modify)
1201                 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1202
1203         mutex_lock(&rvu->rsrc_lock);
1204
1205         /* Check if the request can be accommodated */
1206         err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1207         if (err)
1208                 goto exit;
1209
1210         /* Now attach the requested resources */
1211         if (attach->npalf)
1212                 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
1213
1214         if (attach->nixlf)
1215                 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
1216
1217         if (attach->sso) {
1218                 /* RVU func doesn't know which exact LF or slot is attached
1219                  * to it, it always sees as slot 0,1,2. So for a 'modify'
1220                  * request, simply detach all existing attached LFs/slots
1221                  * and attach a fresh.
1222                  */
1223                 if (attach->modify)
1224                         rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1225                 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
1226         }
1227
1228         if (attach->ssow) {
1229                 if (attach->modify)
1230                         rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1231                 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
1232         }
1233
1234         if (attach->timlfs) {
1235                 if (attach->modify)
1236                         rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1237                 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
1238         }
1239
1240         if (attach->cptlfs) {
1241                 if (attach->modify)
1242                         rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1243                 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
1244         }
1245
1246 exit:
1247         mutex_unlock(&rvu->rsrc_lock);
1248         return err;
1249 }
1250
1251 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1252                                int blkaddr, int lf)
1253 {
1254         u16 vec;
1255
1256         if (lf < 0)
1257                 return MSIX_VECTOR_INVALID;
1258
1259         for (vec = 0; vec < pfvf->msix.max; vec++) {
1260                 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1261                         return vec;
1262         }
1263         return MSIX_VECTOR_INVALID;
1264 }
1265
1266 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1267                                 struct rvu_block *block, int lf)
1268 {
1269         u16 nvecs, vec, offset;
1270         u64 cfg;
1271
1272         cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1273                          (lf << block->lfshift));
1274         nvecs = (cfg >> 12) & 0xFF;
1275
1276         /* Check and alloc MSIX vectors, must be contiguous */
1277         if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1278                 return;
1279
1280         offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1281
1282         /* Config MSIX offset in LF */
1283         rvu_write64(rvu, block->addr, block->msixcfg_reg |
1284                     (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1285
1286         /* Update the bitmap as well */
1287         for (vec = 0; vec < nvecs; vec++)
1288                 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1289 }
1290
1291 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1292                                   struct rvu_block *block, int lf)
1293 {
1294         u16 nvecs, vec, offset;
1295         u64 cfg;
1296
1297         cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1298                          (lf << block->lfshift));
1299         nvecs = (cfg >> 12) & 0xFF;
1300
1301         /* Clear MSIX offset in LF */
1302         rvu_write64(rvu, block->addr, block->msixcfg_reg |
1303                     (lf << block->lfshift), cfg & ~0x7FFULL);
1304
1305         offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1306
1307         /* Update the mapping */
1308         for (vec = 0; vec < nvecs; vec++)
1309                 pfvf->msix_lfmap[offset + vec] = 0;
1310
1311         /* Free the same in MSIX bitmap */
1312         rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1313 }
1314
1315 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1316                                  struct msix_offset_rsp *rsp)
1317 {
1318         struct rvu_hwinfo *hw = rvu->hw;
1319         u16 pcifunc = req->hdr.pcifunc;
1320         struct rvu_pfvf *pfvf;
1321         int lf, slot;
1322
1323         pfvf = rvu_get_pfvf(rvu, pcifunc);
1324         if (!pfvf->msix.bmap)
1325                 return 0;
1326
1327         /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1328         lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1329         rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1330
1331         lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
1332         rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
1333
1334         rsp->sso = pfvf->sso;
1335         for (slot = 0; slot < rsp->sso; slot++) {
1336                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1337                 rsp->sso_msixoff[slot] =
1338                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1339         }
1340
1341         rsp->ssow = pfvf->ssow;
1342         for (slot = 0; slot < rsp->ssow; slot++) {
1343                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1344                 rsp->ssow_msixoff[slot] =
1345                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1346         }
1347
1348         rsp->timlfs = pfvf->timlfs;
1349         for (slot = 0; slot < rsp->timlfs; slot++) {
1350                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1351                 rsp->timlf_msixoff[slot] =
1352                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1353         }
1354
1355         rsp->cptlfs = pfvf->cptlfs;
1356         for (slot = 0; slot < rsp->cptlfs; slot++) {
1357                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1358                 rsp->cptlf_msixoff[slot] =
1359                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1360         }
1361         return 0;
1362 }
1363
1364 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1365                             struct msg_rsp *rsp)
1366 {
1367         u16 pcifunc = req->hdr.pcifunc;
1368         u16 vf, numvfs;
1369         u64 cfg;
1370
1371         vf = pcifunc & RVU_PFVF_FUNC_MASK;
1372         cfg = rvu_read64(rvu, BLKADDR_RVUM,
1373                          RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1374         numvfs = (cfg >> 12) & 0xFF;
1375
1376         if (vf && vf <= numvfs)
1377                 __rvu_flr_handler(rvu, pcifunc);
1378         else
1379                 return RVU_INVALID_VF_ID;
1380
1381         return 0;
1382 }
1383
1384 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1385                                 struct get_hw_cap_rsp *rsp)
1386 {
1387         struct rvu_hwinfo *hw = rvu->hw;
1388
1389         rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1390         rsp->nix_shaping = hw->cap.nix_shaping;
1391
1392         return 0;
1393 }
1394
1395 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1396                                 struct mbox_msghdr *req)
1397 {
1398         struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1399
1400         /* Check if valid, if not reply with a invalid msg */
1401         if (req->sig != OTX2_MBOX_REQ_SIG)
1402                 goto bad_message;
1403
1404         switch (req->id) {
1405 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
1406         case _id: {                                                     \
1407                 struct _rsp_type *rsp;                                  \
1408                 int err;                                                \
1409                                                                         \
1410                 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(          \
1411                         mbox, devid,                                    \
1412                         sizeof(struct _rsp_type));                      \
1413                 /* some handlers should complete even if reply */       \
1414                 /* could not be allocated */                            \
1415                 if (!rsp &&                                             \
1416                     _id != MBOX_MSG_DETACH_RESOURCES &&                 \
1417                     _id != MBOX_MSG_NIX_TXSCH_FREE &&                   \
1418                     _id != MBOX_MSG_VF_FLR)                             \
1419                         return -ENOMEM;                                 \
1420                 if (rsp) {                                              \
1421                         rsp->hdr.id = _id;                              \
1422                         rsp->hdr.sig = OTX2_MBOX_RSP_SIG;               \
1423                         rsp->hdr.pcifunc = req->pcifunc;                \
1424                         rsp->hdr.rc = 0;                                \
1425                 }                                                       \
1426                                                                         \
1427                 err = rvu_mbox_handler_ ## _fn_name(rvu,                \
1428                                                     (struct _req_type *)req, \
1429                                                     rsp);               \
1430                 if (rsp && err)                                         \
1431                         rsp->hdr.rc = err;                              \
1432                                                                         \
1433                 return rsp ? err : -ENOMEM;                             \
1434         }
1435 MBOX_MESSAGES
1436 #undef M
1437
1438 bad_message:
1439         default:
1440                 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
1441                 return -ENODEV;
1442         }
1443 }
1444
1445 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
1446 {
1447         struct rvu *rvu = mwork->rvu;
1448         int offset, err, id, devid;
1449         struct otx2_mbox_dev *mdev;
1450         struct mbox_hdr *req_hdr;
1451         struct mbox_msghdr *msg;
1452         struct mbox_wq_info *mw;
1453         struct otx2_mbox *mbox;
1454
1455         switch (type) {
1456         case TYPE_AFPF:
1457                 mw = &rvu->afpf_wq_info;
1458                 break;
1459         case TYPE_AFVF:
1460                 mw = &rvu->afvf_wq_info;
1461                 break;
1462         default:
1463                 return;
1464         }
1465
1466         devid = mwork - mw->mbox_wrk;
1467         mbox = &mw->mbox;
1468         mdev = &mbox->dev[devid];
1469
1470         /* Process received mbox messages */
1471         req_hdr = mdev->mbase + mbox->rx_start;
1472         if (mw->mbox_wrk[devid].num_msgs == 0)
1473                 return;
1474
1475         offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
1476
1477         for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
1478                 msg = mdev->mbase + offset;
1479
1480                 /* Set which PF/VF sent this message based on mbox IRQ */
1481                 switch (type) {
1482                 case TYPE_AFPF:
1483                         msg->pcifunc &=
1484                                 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
1485                         msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
1486                         break;
1487                 case TYPE_AFVF:
1488                         msg->pcifunc &=
1489                                 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
1490                         msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
1491                         break;
1492                 }
1493
1494                 err = rvu_process_mbox_msg(mbox, devid, msg);
1495                 if (!err) {
1496                         offset = mbox->rx_start + msg->next_msgoff;
1497                         continue;
1498                 }
1499
1500                 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
1501                         dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
1502                                  err, otx2_mbox_id2name(msg->id),
1503                                  msg->id, rvu_get_pf(msg->pcifunc),
1504                                  (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1505                 else
1506                         dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
1507                                  err, otx2_mbox_id2name(msg->id),
1508                                  msg->id, devid);
1509         }
1510         mw->mbox_wrk[devid].num_msgs = 0;
1511
1512         /* Send mbox responses to VF/PF */
1513         otx2_mbox_msg_send(mbox, devid);
1514 }
1515
1516 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
1517 {
1518         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1519
1520         __rvu_mbox_handler(mwork, TYPE_AFPF);
1521 }
1522
1523 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
1524 {
1525         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1526
1527         __rvu_mbox_handler(mwork, TYPE_AFVF);
1528 }
1529
1530 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
1531 {
1532         struct rvu *rvu = mwork->rvu;
1533         struct otx2_mbox_dev *mdev;
1534         struct mbox_hdr *rsp_hdr;
1535         struct mbox_msghdr *msg;
1536         struct mbox_wq_info *mw;
1537         struct otx2_mbox *mbox;
1538         int offset, id, devid;
1539
1540         switch (type) {
1541         case TYPE_AFPF:
1542                 mw = &rvu->afpf_wq_info;
1543                 break;
1544         case TYPE_AFVF:
1545                 mw = &rvu->afvf_wq_info;
1546                 break;
1547         default:
1548                 return;
1549         }
1550
1551         devid = mwork - mw->mbox_wrk_up;
1552         mbox = &mw->mbox_up;
1553         mdev = &mbox->dev[devid];
1554
1555         rsp_hdr = mdev->mbase + mbox->rx_start;
1556         if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
1557                 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
1558                 return;
1559         }
1560
1561         offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
1562
1563         for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
1564                 msg = mdev->mbase + offset;
1565
1566                 if (msg->id >= MBOX_MSG_MAX) {
1567                         dev_err(rvu->dev,
1568                                 "Mbox msg with unknown ID 0x%x\n", msg->id);
1569                         goto end;
1570                 }
1571
1572                 if (msg->sig != OTX2_MBOX_RSP_SIG) {
1573                         dev_err(rvu->dev,
1574                                 "Mbox msg with wrong signature %x, ID 0x%x\n",
1575                                 msg->sig, msg->id);
1576                         goto end;
1577                 }
1578
1579                 switch (msg->id) {
1580                 case MBOX_MSG_CGX_LINK_EVENT:
1581                         break;
1582                 default:
1583                         if (msg->rc)
1584                                 dev_err(rvu->dev,
1585                                         "Mbox msg response has err %d, ID 0x%x\n",
1586                                         msg->rc, msg->id);
1587                         break;
1588                 }
1589 end:
1590                 offset = mbox->rx_start + msg->next_msgoff;
1591                 mdev->msgs_acked++;
1592         }
1593         mw->mbox_wrk_up[devid].up_num_msgs = 0;
1594
1595         otx2_mbox_reset(mbox, devid);
1596 }
1597
1598 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
1599 {
1600         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1601
1602         __rvu_mbox_up_handler(mwork, TYPE_AFPF);
1603 }
1604
1605 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
1606 {
1607         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1608
1609         __rvu_mbox_up_handler(mwork, TYPE_AFVF);
1610 }
1611
1612 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
1613                          int type, int num,
1614                          void (mbox_handler)(struct work_struct *),
1615                          void (mbox_up_handler)(struct work_struct *))
1616 {
1617         void __iomem *hwbase = NULL, *reg_base;
1618         int err, i, dir, dir_up;
1619         struct rvu_work *mwork;
1620         const char *name;
1621         u64 bar4_addr;
1622
1623         switch (type) {
1624         case TYPE_AFPF:
1625                 name = "rvu_afpf_mailbox";
1626                 bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
1627                 dir = MBOX_DIR_AFPF;
1628                 dir_up = MBOX_DIR_AFPF_UP;
1629                 reg_base = rvu->afreg_base;
1630                 break;
1631         case TYPE_AFVF:
1632                 name = "rvu_afvf_mailbox";
1633                 bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
1634                 dir = MBOX_DIR_PFVF;
1635                 dir_up = MBOX_DIR_PFVF_UP;
1636                 reg_base = rvu->pfreg_base;
1637                 break;
1638         default:
1639                 return -EINVAL;
1640         }
1641
1642         mw->mbox_wq = alloc_workqueue(name,
1643                                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1644                                       num);
1645         if (!mw->mbox_wq)
1646                 return -ENOMEM;
1647
1648         mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
1649                                     sizeof(struct rvu_work), GFP_KERNEL);
1650         if (!mw->mbox_wrk) {
1651                 err = -ENOMEM;
1652                 goto exit;
1653         }
1654
1655         mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
1656                                        sizeof(struct rvu_work), GFP_KERNEL);
1657         if (!mw->mbox_wrk_up) {
1658                 err = -ENOMEM;
1659                 goto exit;
1660         }
1661
1662         /* Mailbox is a reserved memory (in RAM) region shared between
1663          * RVU devices, shouldn't be mapped as device memory to allow
1664          * unaligned accesses.
1665          */
1666         hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
1667         if (!hwbase) {
1668                 dev_err(rvu->dev, "Unable to map mailbox region\n");
1669                 err = -ENOMEM;
1670                 goto exit;
1671         }
1672
1673         err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
1674         if (err)
1675                 goto exit;
1676
1677         err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
1678                              reg_base, dir_up, num);
1679         if (err)
1680                 goto exit;
1681
1682         for (i = 0; i < num; i++) {
1683                 mwork = &mw->mbox_wrk[i];
1684                 mwork->rvu = rvu;
1685                 INIT_WORK(&mwork->work, mbox_handler);
1686
1687                 mwork = &mw->mbox_wrk_up[i];
1688                 mwork->rvu = rvu;
1689                 INIT_WORK(&mwork->work, mbox_up_handler);
1690         }
1691
1692         return 0;
1693 exit:
1694         if (hwbase)
1695                 iounmap((void __iomem *)hwbase);
1696         destroy_workqueue(mw->mbox_wq);
1697         return err;
1698 }
1699
1700 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
1701 {
1702         if (mw->mbox_wq) {
1703                 flush_workqueue(mw->mbox_wq);
1704                 destroy_workqueue(mw->mbox_wq);
1705                 mw->mbox_wq = NULL;
1706         }
1707
1708         if (mw->mbox.hwbase)
1709                 iounmap((void __iomem *)mw->mbox.hwbase);
1710
1711         otx2_mbox_destroy(&mw->mbox);
1712         otx2_mbox_destroy(&mw->mbox_up);
1713 }
1714
1715 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
1716                            int mdevs, u64 intr)
1717 {
1718         struct otx2_mbox_dev *mdev;
1719         struct otx2_mbox *mbox;
1720         struct mbox_hdr *hdr;
1721         int i;
1722
1723         for (i = first; i < mdevs; i++) {
1724                 /* start from 0 */
1725                 if (!(intr & BIT_ULL(i - first)))
1726                         continue;
1727
1728                 mbox = &mw->mbox;
1729                 mdev = &mbox->dev[i];
1730                 hdr = mdev->mbase + mbox->rx_start;
1731
1732                 /*The hdr->num_msgs is set to zero immediately in the interrupt
1733                  * handler to  ensure that it holds a correct value next time
1734                  * when the interrupt handler is called.
1735                  * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
1736                  * pf>mbox.up_num_msgs holds the data for use in
1737                  * pfaf_mbox_up_handler.
1738                  */
1739
1740                 if (hdr->num_msgs) {
1741                         mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
1742                         hdr->num_msgs = 0;
1743                         queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
1744                 }
1745                 mbox = &mw->mbox_up;
1746                 mdev = &mbox->dev[i];
1747                 hdr = mdev->mbase + mbox->rx_start;
1748                 if (hdr->num_msgs) {
1749                         mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
1750                         hdr->num_msgs = 0;
1751                         queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
1752                 }
1753         }
1754 }
1755
1756 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
1757 {
1758         struct rvu *rvu = (struct rvu *)rvu_irq;
1759         int vfs = rvu->vfs;
1760         u64 intr;
1761
1762         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
1763         /* Clear interrupts */
1764         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
1765
1766         /* Sync with mbox memory region */
1767         rmb();
1768
1769         rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
1770
1771         /* Handle VF interrupts */
1772         if (vfs > 64) {
1773                 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
1774                 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
1775
1776                 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
1777                 vfs -= 64;
1778         }
1779
1780         intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
1781         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
1782
1783         rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
1784
1785         return IRQ_HANDLED;
1786 }
1787
1788 static void rvu_enable_mbox_intr(struct rvu *rvu)
1789 {
1790         struct rvu_hwinfo *hw = rvu->hw;
1791
1792         /* Clear spurious irqs, if any */
1793         rvu_write64(rvu, BLKADDR_RVUM,
1794                     RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
1795
1796         /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
1797         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
1798                     INTR_MASK(hw->total_pfs) & ~1ULL);
1799 }
1800
1801 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
1802 {
1803         struct rvu_block *block;
1804         int slot, lf, num_lfs;
1805         int err;
1806
1807         block = &rvu->hw->block[blkaddr];
1808         num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
1809                                         block->type);
1810         if (!num_lfs)
1811                 return;
1812         for (slot = 0; slot < num_lfs; slot++) {
1813                 lf = rvu_get_lf(rvu, block, pcifunc, slot);
1814                 if (lf < 0)
1815                         continue;
1816
1817                 /* Cleanup LF and reset it */
1818                 if (block->addr == BLKADDR_NIX0)
1819                         rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
1820                 else if (block->addr == BLKADDR_NPA)
1821                         rvu_npa_lf_teardown(rvu, pcifunc, lf);
1822
1823                 err = rvu_lf_reset(rvu, block, lf);
1824                 if (err) {
1825                         dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
1826                                 block->addr, lf);
1827                 }
1828         }
1829 }
1830
1831 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
1832 {
1833         mutex_lock(&rvu->flr_lock);
1834         /* Reset order should reflect inter-block dependencies:
1835          * 1. Reset any packet/work sources (NIX, CPT, TIM)
1836          * 2. Flush and reset SSO/SSOW
1837          * 3. Cleanup pools (NPA)
1838          */
1839         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
1840         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
1841         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
1842         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
1843         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
1844         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
1845         rvu_detach_rsrcs(rvu, NULL, pcifunc);
1846         mutex_unlock(&rvu->flr_lock);
1847 }
1848
1849 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
1850 {
1851         int reg = 0;
1852
1853         /* pcifunc = 0(PF0) | (vf + 1) */
1854         __rvu_flr_handler(rvu, vf + 1);
1855
1856         if (vf >= 64) {
1857                 reg = 1;
1858                 vf = vf - 64;
1859         }
1860
1861         /* Signal FLR finish and enable IRQ */
1862         rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
1863         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
1864 }
1865
1866 static void rvu_flr_handler(struct work_struct *work)
1867 {
1868         struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
1869         struct rvu *rvu = flrwork->rvu;
1870         u16 pcifunc, numvfs, vf;
1871         u64 cfg;
1872         int pf;
1873
1874         pf = flrwork - rvu->flr_wrk;
1875         if (pf >= rvu->hw->total_pfs) {
1876                 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
1877                 return;
1878         }
1879
1880         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1881         numvfs = (cfg >> 12) & 0xFF;
1882         pcifunc  = pf << RVU_PFVF_PF_SHIFT;
1883
1884         for (vf = 0; vf < numvfs; vf++)
1885                 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
1886
1887         __rvu_flr_handler(rvu, pcifunc);
1888
1889         /* Signal FLR finish */
1890         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
1891
1892         /* Enable interrupt */
1893         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
1894 }
1895
1896 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
1897 {
1898         int dev, vf, reg = 0;
1899         u64 intr;
1900
1901         if (start_vf >= 64)
1902                 reg = 1;
1903
1904         intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
1905         if (!intr)
1906                 return;
1907
1908         for (vf = 0; vf < numvfs; vf++) {
1909                 if (!(intr & BIT_ULL(vf)))
1910                         continue;
1911                 dev = vf + start_vf + rvu->hw->total_pfs;
1912                 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
1913                 /* Clear and disable the interrupt */
1914                 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
1915                 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
1916         }
1917 }
1918
1919 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
1920 {
1921         struct rvu *rvu = (struct rvu *)rvu_irq;
1922         u64 intr;
1923         u8  pf;
1924
1925         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
1926         if (!intr)
1927                 goto afvf_flr;
1928
1929         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1930                 if (intr & (1ULL << pf)) {
1931                         /* PF is already dead do only AF related operations */
1932                         queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
1933                         /* clear interrupt */
1934                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
1935                                     BIT_ULL(pf));
1936                         /* Disable the interrupt */
1937                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
1938                                     BIT_ULL(pf));
1939                 }
1940         }
1941
1942 afvf_flr:
1943         rvu_afvf_queue_flr_work(rvu, 0, 64);
1944         if (rvu->vfs > 64)
1945                 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
1946
1947         return IRQ_HANDLED;
1948 }
1949
1950 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
1951 {
1952         int vf;
1953
1954         /* Nothing to be done here other than clearing the
1955          * TRPEND bit.
1956          */
1957         for (vf = 0; vf < 64; vf++) {
1958                 if (intr & (1ULL << vf)) {
1959                         /* clear the trpend due to ME(master enable) */
1960                         rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
1961                         /* clear interrupt */
1962                         rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
1963                 }
1964         }
1965 }
1966
1967 /* Handles ME interrupts from VFs of AF */
1968 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
1969 {
1970         struct rvu *rvu = (struct rvu *)rvu_irq;
1971         int vfset;
1972         u64 intr;
1973
1974         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
1975
1976         for (vfset = 0; vfset <= 1; vfset++) {
1977                 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
1978                 if (intr)
1979                         rvu_me_handle_vfset(rvu, vfset, intr);
1980         }
1981
1982         return IRQ_HANDLED;
1983 }
1984
1985 /* Handles ME interrupts from PFs */
1986 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
1987 {
1988         struct rvu *rvu = (struct rvu *)rvu_irq;
1989         u64 intr;
1990         u8  pf;
1991
1992         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
1993
1994         /* Nothing to be done here other than clearing the
1995          * TRPEND bit.
1996          */
1997         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
1998                 if (intr & (1ULL << pf)) {
1999                         /* clear the trpend due to ME(master enable) */
2000                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2001                                     BIT_ULL(pf));
2002                         /* clear interrupt */
2003                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2004                                     BIT_ULL(pf));
2005                 }
2006         }
2007
2008         return IRQ_HANDLED;
2009 }
2010
2011 static void rvu_unregister_interrupts(struct rvu *rvu)
2012 {
2013         int irq;
2014
2015         /* Disable the Mbox interrupt */
2016         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2017                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2018
2019         /* Disable the PF FLR interrupt */
2020         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2021                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2022
2023         /* Disable the PF ME interrupt */
2024         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2025                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2026
2027         for (irq = 0; irq < rvu->num_vec; irq++) {
2028                 if (rvu->irq_allocated[irq])
2029                         free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2030         }
2031
2032         pci_free_irq_vectors(rvu->pdev);
2033         rvu->num_vec = 0;
2034 }
2035
2036 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2037 {
2038         struct rvu_pfvf *pfvf = &rvu->pf[0];
2039         int offset;
2040
2041         pfvf = &rvu->pf[0];
2042         offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2043
2044         /* Make sure there are enough MSIX vectors configured so that
2045          * VF interrupts can be handled. Offset equal to zero means
2046          * that PF vectors are not configured and overlapping AF vectors.
2047          */
2048         return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2049                offset;
2050 }
2051
2052 static int rvu_register_interrupts(struct rvu *rvu)
2053 {
2054         int ret, offset, pf_vec_start;
2055
2056         rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2057
2058         rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2059                                            NAME_SIZE, GFP_KERNEL);
2060         if (!rvu->irq_name)
2061                 return -ENOMEM;
2062
2063         rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2064                                           sizeof(bool), GFP_KERNEL);
2065         if (!rvu->irq_allocated)
2066                 return -ENOMEM;
2067
2068         /* Enable MSI-X */
2069         ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2070                                     rvu->num_vec, PCI_IRQ_MSIX);
2071         if (ret < 0) {
2072                 dev_err(rvu->dev,
2073                         "RVUAF: Request for %d msix vectors failed, ret %d\n",
2074                         rvu->num_vec, ret);
2075                 return ret;
2076         }
2077
2078         /* Register mailbox interrupt handler */
2079         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2080         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2081                           rvu_mbox_intr_handler, 0,
2082                           &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2083         if (ret) {
2084                 dev_err(rvu->dev,
2085                         "RVUAF: IRQ registration failed for mbox irq\n");
2086                 goto fail;
2087         }
2088
2089         rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2090
2091         /* Enable mailbox interrupts from all PFs */
2092         rvu_enable_mbox_intr(rvu);
2093
2094         /* Register FLR interrupt handler */
2095         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2096                 "RVUAF FLR");
2097         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2098                           rvu_flr_intr_handler, 0,
2099                           &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2100                           rvu);
2101         if (ret) {
2102                 dev_err(rvu->dev,
2103                         "RVUAF: IRQ registration failed for FLR\n");
2104                 goto fail;
2105         }
2106         rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2107
2108         /* Enable FLR interrupt for all PFs*/
2109         rvu_write64(rvu, BLKADDR_RVUM,
2110                     RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2111
2112         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2113                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2114
2115         /* Register ME interrupt handler */
2116         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2117                 "RVUAF ME");
2118         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2119                           rvu_me_pf_intr_handler, 0,
2120                           &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2121                           rvu);
2122         if (ret) {
2123                 dev_err(rvu->dev,
2124                         "RVUAF: IRQ registration failed for ME\n");
2125         }
2126         rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2127
2128         /* Enable ME interrupt for all PFs*/
2129         rvu_write64(rvu, BLKADDR_RVUM,
2130                     RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2131
2132         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2133                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2134
2135         if (!rvu_afvf_msix_vectors_num_ok(rvu))
2136                 return 0;
2137
2138         /* Get PF MSIX vectors offset. */
2139         pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2140                                   RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2141
2142         /* Register MBOX0 interrupt. */
2143         offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2144         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2145         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2146                           rvu_mbox_intr_handler, 0,
2147                           &rvu->irq_name[offset * NAME_SIZE],
2148                           rvu);
2149         if (ret)
2150                 dev_err(rvu->dev,
2151                         "RVUAF: IRQ registration failed for Mbox0\n");
2152
2153         rvu->irq_allocated[offset] = true;
2154
2155         /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2156          * simply increment current offset by 1.
2157          */
2158         offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2159         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2160         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2161                           rvu_mbox_intr_handler, 0,
2162                           &rvu->irq_name[offset * NAME_SIZE],
2163                           rvu);
2164         if (ret)
2165                 dev_err(rvu->dev,
2166                         "RVUAF: IRQ registration failed for Mbox1\n");
2167
2168         rvu->irq_allocated[offset] = true;
2169
2170         /* Register FLR interrupt handler for AF's VFs */
2171         offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2172         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2173         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2174                           rvu_flr_intr_handler, 0,
2175                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2176         if (ret) {
2177                 dev_err(rvu->dev,
2178                         "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2179                 goto fail;
2180         }
2181         rvu->irq_allocated[offset] = true;
2182
2183         offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2184         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2185         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2186                           rvu_flr_intr_handler, 0,
2187                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2188         if (ret) {
2189                 dev_err(rvu->dev,
2190                         "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2191                 goto fail;
2192         }
2193         rvu->irq_allocated[offset] = true;
2194
2195         /* Register ME interrupt handler for AF's VFs */
2196         offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2197         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2198         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2199                           rvu_me_vf_intr_handler, 0,
2200                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2201         if (ret) {
2202                 dev_err(rvu->dev,
2203                         "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2204                 goto fail;
2205         }
2206         rvu->irq_allocated[offset] = true;
2207
2208         offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2209         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2210         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2211                           rvu_me_vf_intr_handler, 0,
2212                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2213         if (ret) {
2214                 dev_err(rvu->dev,
2215                         "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2216                 goto fail;
2217         }
2218         rvu->irq_allocated[offset] = true;
2219         return 0;
2220
2221 fail:
2222         rvu_unregister_interrupts(rvu);
2223         return ret;
2224 }
2225
2226 static void rvu_flr_wq_destroy(struct rvu *rvu)
2227 {
2228         if (rvu->flr_wq) {
2229                 flush_workqueue(rvu->flr_wq);
2230                 destroy_workqueue(rvu->flr_wq);
2231                 rvu->flr_wq = NULL;
2232         }
2233 }
2234
2235 static int rvu_flr_init(struct rvu *rvu)
2236 {
2237         int dev, num_devs;
2238         u64 cfg;
2239         int pf;
2240
2241         /* Enable FLR for all PFs*/
2242         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2243                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2244                 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2245                             cfg | BIT_ULL(22));
2246         }
2247
2248         rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2249                                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2250                                        1);
2251         if (!rvu->flr_wq)
2252                 return -ENOMEM;
2253
2254         num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2255         rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2256                                     sizeof(struct rvu_work), GFP_KERNEL);
2257         if (!rvu->flr_wrk) {
2258                 destroy_workqueue(rvu->flr_wq);
2259                 return -ENOMEM;
2260         }
2261
2262         for (dev = 0; dev < num_devs; dev++) {
2263                 rvu->flr_wrk[dev].rvu = rvu;
2264                 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2265         }
2266
2267         mutex_init(&rvu->flr_lock);
2268
2269         return 0;
2270 }
2271
2272 static void rvu_disable_afvf_intr(struct rvu *rvu)
2273 {
2274         int vfs = rvu->vfs;
2275
2276         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2277         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2278         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2279         if (vfs <= 64)
2280                 return;
2281
2282         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2283                       INTR_MASK(vfs - 64));
2284         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2285         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2286 }
2287
2288 static void rvu_enable_afvf_intr(struct rvu *rvu)
2289 {
2290         int vfs = rvu->vfs;
2291
2292         /* Clear any pending interrupts and enable AF VF interrupts for
2293          * the first 64 VFs.
2294          */
2295         /* Mbox */
2296         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2297         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2298
2299         /* FLR */
2300         rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2301         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2302         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2303
2304         /* Same for remaining VFs, if any. */
2305         if (vfs <= 64)
2306                 return;
2307
2308         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2309         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2310                       INTR_MASK(vfs - 64));
2311
2312         rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2313         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2314         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2315 }
2316
2317 #define PCI_DEVID_OCTEONTX2_LBK 0xA061
2318
2319 static int lbk_get_num_chans(void)
2320 {
2321         struct pci_dev *pdev;
2322         void __iomem *base;
2323         int ret = -EIO;
2324
2325         pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2326                               NULL);
2327         if (!pdev)
2328                 goto err;
2329
2330         base = pci_ioremap_bar(pdev, 0);
2331         if (!base)
2332                 goto err_put;
2333
2334         /* Read number of available LBK channels from LBK(0)_CONST register. */
2335         ret = (readq(base + 0x10) >> 32) & 0xffff;
2336         iounmap(base);
2337 err_put:
2338         pci_dev_put(pdev);
2339 err:
2340         return ret;
2341 }
2342
2343 static int rvu_enable_sriov(struct rvu *rvu)
2344 {
2345         struct pci_dev *pdev = rvu->pdev;
2346         int err, chans, vfs;
2347
2348         if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
2349                 dev_warn(&pdev->dev,
2350                          "Skipping SRIOV enablement since not enough IRQs are available\n");
2351                 return 0;
2352         }
2353
2354         chans = lbk_get_num_chans();
2355         if (chans < 0)
2356                 return chans;
2357
2358         vfs = pci_sriov_get_totalvfs(pdev);
2359
2360         /* Limit VFs in case we have more VFs than LBK channels available. */
2361         if (vfs > chans)
2362                 vfs = chans;
2363
2364         /* AF's VFs work in pairs and talk over consecutive loopback channels.
2365          * Thus we want to enable maximum even number of VFs. In case
2366          * odd number of VFs are available then the last VF on the list
2367          * remains disabled.
2368          */
2369         if (vfs & 0x1) {
2370                 dev_warn(&pdev->dev,
2371                          "Number of VFs should be even. Enabling %d out of %d.\n",
2372                          vfs - 1, vfs);
2373                 vfs--;
2374         }
2375
2376         if (!vfs)
2377                 return 0;
2378
2379         /* Save VFs number for reference in VF interrupts handlers.
2380          * Since interrupts might start arriving during SRIOV enablement
2381          * ordinary API cannot be used to get number of enabled VFs.
2382          */
2383         rvu->vfs = vfs;
2384
2385         err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
2386                             rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
2387         if (err)
2388                 return err;
2389
2390         rvu_enable_afvf_intr(rvu);
2391         /* Make sure IRQs are enabled before SRIOV. */
2392         mb();
2393
2394         err = pci_enable_sriov(pdev, vfs);
2395         if (err) {
2396                 rvu_disable_afvf_intr(rvu);
2397                 rvu_mbox_destroy(&rvu->afvf_wq_info);
2398                 return err;
2399         }
2400
2401         return 0;
2402 }
2403
2404 static void rvu_disable_sriov(struct rvu *rvu)
2405 {
2406         rvu_disable_afvf_intr(rvu);
2407         rvu_mbox_destroy(&rvu->afvf_wq_info);
2408         pci_disable_sriov(rvu->pdev);
2409 }
2410
2411 static void rvu_update_module_params(struct rvu *rvu)
2412 {
2413         const char *default_pfl_name = "default";
2414
2415         strscpy(rvu->mkex_pfl_name,
2416                 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
2417 }
2418
2419 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2420 {
2421         struct device *dev = &pdev->dev;
2422         struct rvu *rvu;
2423         int    err;
2424
2425         rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
2426         if (!rvu)
2427                 return -ENOMEM;
2428
2429         rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
2430         if (!rvu->hw) {
2431                 devm_kfree(dev, rvu);
2432                 return -ENOMEM;
2433         }
2434
2435         pci_set_drvdata(pdev, rvu);
2436         rvu->pdev = pdev;
2437         rvu->dev = &pdev->dev;
2438
2439         err = pci_enable_device(pdev);
2440         if (err) {
2441                 dev_err(dev, "Failed to enable PCI device\n");
2442                 goto err_freemem;
2443         }
2444
2445         err = pci_request_regions(pdev, DRV_NAME);
2446         if (err) {
2447                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2448                 goto err_disable_device;
2449         }
2450
2451         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
2452         if (err) {
2453                 dev_err(dev, "Unable to set DMA mask\n");
2454                 goto err_release_regions;
2455         }
2456
2457         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
2458         if (err) {
2459                 dev_err(dev, "Unable to set consistent DMA mask\n");
2460                 goto err_release_regions;
2461         }
2462
2463         /* Map Admin function CSRs */
2464         rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
2465         rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
2466         if (!rvu->afreg_base || !rvu->pfreg_base) {
2467                 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
2468                 err = -ENOMEM;
2469                 goto err_release_regions;
2470         }
2471
2472         /* Store module params in rvu structure */
2473         rvu_update_module_params(rvu);
2474
2475         /* Check which blocks the HW supports */
2476         rvu_check_block_implemented(rvu);
2477
2478         rvu_reset_all_blocks(rvu);
2479
2480         rvu_setup_hw_capabilities(rvu);
2481
2482         err = rvu_setup_hw_resources(rvu);
2483         if (err)
2484                 goto err_release_regions;
2485
2486         /* Init mailbox btw AF and PFs */
2487         err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
2488                             rvu->hw->total_pfs, rvu_afpf_mbox_handler,
2489                             rvu_afpf_mbox_up_handler);
2490         if (err)
2491                 goto err_hwsetup;
2492
2493         err = rvu_flr_init(rvu);
2494         if (err)
2495                 goto err_mbox;
2496
2497         err = rvu_register_interrupts(rvu);
2498         if (err)
2499                 goto err_flr;
2500
2501         /* Enable AF's VFs (if any) */
2502         err = rvu_enable_sriov(rvu);
2503         if (err)
2504                 goto err_irq;
2505
2506         /* Initialize debugfs */
2507         rvu_dbg_init(rvu);
2508
2509         return 0;
2510 err_irq:
2511         rvu_unregister_interrupts(rvu);
2512 err_flr:
2513         rvu_flr_wq_destroy(rvu);
2514 err_mbox:
2515         rvu_mbox_destroy(&rvu->afpf_wq_info);
2516 err_hwsetup:
2517         rvu_cgx_exit(rvu);
2518         rvu_reset_all_blocks(rvu);
2519         rvu_free_hw_resources(rvu);
2520 err_release_regions:
2521         pci_release_regions(pdev);
2522 err_disable_device:
2523         pci_disable_device(pdev);
2524 err_freemem:
2525         pci_set_drvdata(pdev, NULL);
2526         devm_kfree(&pdev->dev, rvu->hw);
2527         devm_kfree(dev, rvu);
2528         return err;
2529 }
2530
2531 static void rvu_remove(struct pci_dev *pdev)
2532 {
2533         struct rvu *rvu = pci_get_drvdata(pdev);
2534
2535         rvu_dbg_exit(rvu);
2536         rvu_unregister_interrupts(rvu);
2537         rvu_flr_wq_destroy(rvu);
2538         rvu_cgx_exit(rvu);
2539         rvu_mbox_destroy(&rvu->afpf_wq_info);
2540         rvu_disable_sriov(rvu);
2541         rvu_reset_all_blocks(rvu);
2542         rvu_free_hw_resources(rvu);
2543
2544         pci_release_regions(pdev);
2545         pci_disable_device(pdev);
2546         pci_set_drvdata(pdev, NULL);
2547
2548         devm_kfree(&pdev->dev, rvu->hw);
2549         devm_kfree(&pdev->dev, rvu);
2550 }
2551
2552 static struct pci_driver rvu_driver = {
2553         .name = DRV_NAME,
2554         .id_table = rvu_id_table,
2555         .probe = rvu_probe,
2556         .remove = rvu_remove,
2557 };
2558
2559 static int __init rvu_init_module(void)
2560 {
2561         int err;
2562
2563         pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2564
2565         err = pci_register_driver(&cgx_driver);
2566         if (err < 0)
2567                 return err;
2568
2569         err =  pci_register_driver(&rvu_driver);
2570         if (err < 0)
2571                 pci_unregister_driver(&cgx_driver);
2572
2573         return err;
2574 }
2575
2576 static void __exit rvu_cleanup_module(void)
2577 {
2578         pci_unregister_driver(&rvu_driver);
2579         pci_unregister_driver(&cgx_driver);
2580 }
2581
2582 module_init(rvu_init_module);
2583 module_exit(rvu_cleanup_module);