1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/printk.h>
4 #include <linux/slab.h>
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9 #include "nitrox_hal.h"
12 * One vector for each type of ring
13 * - NPS packet ring, AQMQ ring and ZQMQ ring
15 #define NR_RING_VECTORS 3
16 #define NR_NON_RING_VECTORS 1
17 /* base entry for packet ring/port */
18 #define PKT_RING_MSIX_BASE 0
19 #define NON_RING_MSIX_BASE 192
22 * nps_pkt_slc_isr - IRQ handler for NPS solicit port
26 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
28 struct nitrox_q_vector *qvec = data;
29 union nps_pkt_slc_cnts slc_cnts;
30 struct nitrox_cmdq *cmdq = qvec->cmdq;
32 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
33 /* New packet on SLC output port */
34 if (slc_cnts.s.slc_int)
35 tasklet_hi_schedule(&qvec->resp_tasklet);
40 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
44 /* Write 1 to clear */
45 value = nitrox_read_csr(ndev, NPS_CORE_INT);
46 nitrox_write_csr(ndev, NPS_CORE_INT, value);
48 dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT 0x%016llx\n", value);
51 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
53 union nps_pkt_int pkt_int;
54 unsigned long value, offset;
57 pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
58 dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT 0x%016llx\n",
61 if (pkt_int.s.slc_err) {
62 offset = NPS_PKT_SLC_ERR_TYPE;
63 value = nitrox_read_csr(ndev, offset);
64 nitrox_write_csr(ndev, offset, value);
65 dev_err_ratelimited(DEV(ndev),
66 "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value);
68 offset = NPS_PKT_SLC_RERR_LO;
69 value = nitrox_read_csr(ndev, offset);
70 nitrox_write_csr(ndev, offset, value);
71 /* enable the solicit ports */
72 for_each_set_bit(i, &value, BITS_PER_LONG)
73 enable_pkt_solicit_port(ndev, i);
75 dev_err_ratelimited(DEV(ndev),
76 "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value);
78 offset = NPS_PKT_SLC_RERR_HI;
79 value = nitrox_read_csr(ndev, offset);
80 nitrox_write_csr(ndev, offset, value);
81 dev_err_ratelimited(DEV(ndev),
82 "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value);
85 if (pkt_int.s.in_err) {
86 offset = NPS_PKT_IN_ERR_TYPE;
87 value = nitrox_read_csr(ndev, offset);
88 nitrox_write_csr(ndev, offset, value);
89 dev_err_ratelimited(DEV(ndev),
90 "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value);
91 offset = NPS_PKT_IN_RERR_LO;
92 value = nitrox_read_csr(ndev, offset);
93 nitrox_write_csr(ndev, offset, value);
94 /* enable the input ring */
95 for_each_set_bit(i, &value, BITS_PER_LONG)
96 enable_pkt_input_ring(ndev, i);
98 dev_err_ratelimited(DEV(ndev),
99 "NPS_PKT_IN_RERR_LO 0x%016lx\n", value);
101 offset = NPS_PKT_IN_RERR_HI;
102 value = nitrox_read_csr(ndev, offset);
103 nitrox_write_csr(ndev, offset, value);
104 dev_err_ratelimited(DEV(ndev),
105 "NPS_PKT_IN_RERR_HI 0x%016lx\n", value);
109 static void clear_pom_err_intr(struct nitrox_device *ndev)
113 value = nitrox_read_csr(ndev, POM_INT);
114 nitrox_write_csr(ndev, POM_INT, value);
115 dev_err_ratelimited(DEV(ndev), "POM_INT 0x%016llx\n", value);
118 static void clear_pem_err_intr(struct nitrox_device *ndev)
122 value = nitrox_read_csr(ndev, PEM0_INT);
123 nitrox_write_csr(ndev, PEM0_INT, value);
124 dev_err_ratelimited(DEV(ndev), "PEM(0)_INT 0x%016llx\n", value);
127 static void clear_lbc_err_intr(struct nitrox_device *ndev)
129 union lbc_int lbc_int;
133 lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
134 dev_err_ratelimited(DEV(ndev), "LBC_INT 0x%016llx\n", lbc_int.value);
136 if (lbc_int.s.dma_rd_err) {
137 for (i = 0; i < NR_CLUSTERS; i++) {
138 offset = EFL_CORE_VF_ERR_INT0X(i);
139 value = nitrox_read_csr(ndev, offset);
140 nitrox_write_csr(ndev, offset, value);
141 offset = EFL_CORE_VF_ERR_INT1X(i);
142 value = nitrox_read_csr(ndev, offset);
143 nitrox_write_csr(ndev, offset, value);
147 if (lbc_int.s.cam_soft_err) {
148 dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
149 invalidate_lbc(ndev);
152 if (lbc_int.s.pref_dat_len_mismatch_err) {
153 offset = LBC_PLM_VF1_64_INT;
154 value = nitrox_read_csr(ndev, offset);
155 nitrox_write_csr(ndev, offset, value);
156 offset = LBC_PLM_VF65_128_INT;
157 value = nitrox_read_csr(ndev, offset);
158 nitrox_write_csr(ndev, offset, value);
161 if (lbc_int.s.rd_dat_len_mismatch_err) {
162 offset = LBC_ELM_VF1_64_INT;
163 value = nitrox_read_csr(ndev, offset);
164 nitrox_write_csr(ndev, offset, value);
165 offset = LBC_ELM_VF65_128_INT;
166 value = nitrox_read_csr(ndev, offset);
167 nitrox_write_csr(ndev, offset, value);
169 nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
172 static void clear_efl_err_intr(struct nitrox_device *ndev)
176 for (i = 0; i < NR_CLUSTERS; i++) {
177 union efl_core_int core_int;
180 offset = EFL_CORE_INTX(i);
181 core_int.value = nitrox_read_csr(ndev, offset);
182 nitrox_write_csr(ndev, offset, core_int.value);
183 dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT 0x%016llx\n",
185 if (core_int.s.se_err) {
186 offset = EFL_CORE_SE_ERR_INTX(i);
187 value = nitrox_read_csr(ndev, offset);
188 nitrox_write_csr(ndev, offset, value);
193 static void clear_bmi_err_intr(struct nitrox_device *ndev)
197 value = nitrox_read_csr(ndev, BMI_INT);
198 nitrox_write_csr(ndev, BMI_INT, value);
199 dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
202 static void nps_core_int_tasklet(unsigned long data)
204 struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
205 struct nitrox_device *ndev = qvec->ndev;
207 /* if pf mode do queue recovery */
208 if (ndev->mode == __NDEV_MODE_PF) {
211 * if VF(s) enabled communicate the error information
218 * nps_core_int_isr - interrupt handler for NITROX errors and
219 * mailbox communication
221 static irqreturn_t nps_core_int_isr(int irq, void *data)
223 struct nitrox_device *ndev = data;
224 union nps_core_int_active core_int;
226 core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
228 if (core_int.s.nps_core)
229 clear_nps_core_err_intr(ndev);
231 if (core_int.s.nps_pkt)
232 clear_nps_pkt_err_intr(ndev);
235 clear_pom_err_intr(ndev);
238 clear_pem_err_intr(ndev);
241 clear_lbc_err_intr(ndev);
244 clear_efl_err_intr(ndev);
247 clear_bmi_err_intr(ndev);
249 /* If more work callback the ISR, set resend */
250 core_int.s.resend = 1;
251 nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
256 void nitrox_unregister_interrupts(struct nitrox_device *ndev)
258 struct pci_dev *pdev = ndev->pdev;
261 for (i = 0; i < ndev->num_vecs; i++) {
262 struct nitrox_q_vector *qvec;
265 qvec = ndev->qvec + i;
269 /* get the vector number */
270 vec = pci_irq_vector(pdev, i);
271 irq_set_affinity_hint(vec, NULL);
274 tasklet_disable(&qvec->resp_tasklet);
275 tasklet_kill(&qvec->resp_tasklet);
280 pci_free_irq_vectors(pdev);
283 int nitrox_register_interrupts(struct nitrox_device *ndev)
285 struct pci_dev *pdev = ndev->pdev;
286 struct nitrox_q_vector *qvec;
287 int nr_vecs, vec, cpu;
293 * Entry 0: NPS PKT ring 0
294 * Entry 1: AQMQ ring 0
295 * Entry 2: ZQM ring 0
296 * Entry 3: NPS PKT ring 1
297 * Entry 4: AQMQ ring 1
298 * Entry 5: ZQM ring 1
300 * Entry 192: NPS_CORE_INT_ACTIVE
302 nr_vecs = pci_msix_vec_count(pdev);
305 ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
307 dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
310 ndev->num_vecs = nr_vecs;
312 ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
314 pci_free_irq_vectors(pdev);
318 /* request irqs for packet rings/ports */
319 for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
320 qvec = &ndev->qvec[i];
322 qvec->ring = i / NR_RING_VECTORS;
323 if (qvec->ring >= ndev->nr_queues)
326 qvec->cmdq = &ndev->pkt_inq[qvec->ring];
327 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
328 /* get the vector number */
329 vec = pci_irq_vector(pdev, i);
330 ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
332 dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
336 cpu = qvec->ring % num_online_cpus();
337 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
339 tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
340 (unsigned long)qvec);
344 /* request irqs for non ring vectors */
345 i = NON_RING_MSIX_BASE;
346 qvec = &ndev->qvec[i];
349 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
350 /* get the vector number */
351 vec = pci_irq_vector(pdev, i);
352 ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
354 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
357 cpu = num_online_cpus();
358 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
360 tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
361 (unsigned long)qvec);
367 nitrox_unregister_interrupts(ndev);
371 void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
373 struct pci_dev *pdev = ndev->pdev;
376 for (i = 0; i < ndev->num_vecs; i++) {
377 struct nitrox_q_vector *qvec;
380 qvec = ndev->qvec + i;
384 vec = ndev->iov.msix.vector;
385 irq_set_affinity_hint(vec, NULL);
388 tasklet_disable(&qvec->resp_tasklet);
389 tasklet_kill(&qvec->resp_tasklet);
394 pci_disable_msix(pdev);
397 int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
399 struct pci_dev *pdev = ndev->pdev;
400 struct nitrox_q_vector *qvec;
405 * only non ring vectors i.e Entry 192 is available
406 * for PF in SR-IOV mode.
408 ndev->iov.msix.entry = NON_RING_MSIX_BASE;
409 ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
411 dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
416 qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
418 pci_disable_msix(pdev);
424 ndev->num_vecs = NR_NON_RING_VECTORS;
425 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
428 vec = ndev->iov.msix.vector;
429 ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
431 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
435 cpu = num_online_cpus();
436 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
438 tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
439 (unsigned long)qvec);
445 nitrox_sriov_unregister_interrupts(ndev);