]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/crypto/cavium/nitrox/nitrox_isr.c
crypto: cavium/nitrox - Enable interrups for PF in SR-IOV mode.
[linux.git] / drivers / crypto / cavium / nitrox / nitrox_isr.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pci.h>
3 #include <linux/printk.h>
4 #include <linux/slab.h>
5
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9 #include "nitrox_hal.h"
10
11 /**
12  * One vector for each type of ring
13  *  - NPS packet ring, AQMQ ring and ZQMQ ring
14  */
15 #define NR_RING_VECTORS 3
16 #define NR_NON_RING_VECTORS 1
17 /* base entry for packet ring/port */
18 #define PKT_RING_MSIX_BASE 0
19 #define NON_RING_MSIX_BASE 192
20
21 /**
22  * nps_pkt_slc_isr - IRQ handler for NPS solicit port
23  * @irq: irq number
24  * @data: argument
25  */
26 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
27 {
28         struct nitrox_q_vector *qvec = data;
29         union nps_pkt_slc_cnts slc_cnts;
30         struct nitrox_cmdq *cmdq = qvec->cmdq;
31
32         slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
33         /* New packet on SLC output port */
34         if (slc_cnts.s.slc_int)
35                 tasklet_hi_schedule(&qvec->resp_tasklet);
36
37         return IRQ_HANDLED;
38 }
39
40 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
41 {
42         u64 value;
43
44         /* Write 1 to clear */
45         value = nitrox_read_csr(ndev, NPS_CORE_INT);
46         nitrox_write_csr(ndev, NPS_CORE_INT, value);
47
48         dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
49 }
50
51 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
52 {
53         union nps_pkt_int pkt_int;
54         unsigned long value, offset;
55         int i;
56
57         pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
58         dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
59                             pkt_int.value);
60
61         if (pkt_int.s.slc_err) {
62                 offset = NPS_PKT_SLC_ERR_TYPE;
63                 value = nitrox_read_csr(ndev, offset);
64                 nitrox_write_csr(ndev, offset, value);
65                 dev_err_ratelimited(DEV(ndev),
66                                     "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
67
68                 offset = NPS_PKT_SLC_RERR_LO;
69                 value = nitrox_read_csr(ndev, offset);
70                 nitrox_write_csr(ndev, offset, value);
71                 /* enable the solicit ports */
72                 for_each_set_bit(i, &value, BITS_PER_LONG)
73                         enable_pkt_solicit_port(ndev, i);
74
75                 dev_err_ratelimited(DEV(ndev),
76                                     "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
77
78                 offset = NPS_PKT_SLC_RERR_HI;
79                 value = nitrox_read_csr(ndev, offset);
80                 nitrox_write_csr(ndev, offset, value);
81                 dev_err_ratelimited(DEV(ndev),
82                                     "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
83         }
84
85         if (pkt_int.s.in_err) {
86                 offset = NPS_PKT_IN_ERR_TYPE;
87                 value = nitrox_read_csr(ndev, offset);
88                 nitrox_write_csr(ndev, offset, value);
89                 dev_err_ratelimited(DEV(ndev),
90                                     "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
91                 offset = NPS_PKT_IN_RERR_LO;
92                 value = nitrox_read_csr(ndev, offset);
93                 nitrox_write_csr(ndev, offset, value);
94                 /* enable the input ring */
95                 for_each_set_bit(i, &value, BITS_PER_LONG)
96                         enable_pkt_input_ring(ndev, i);
97
98                 dev_err_ratelimited(DEV(ndev),
99                                     "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
100
101                 offset = NPS_PKT_IN_RERR_HI;
102                 value = nitrox_read_csr(ndev, offset);
103                 nitrox_write_csr(ndev, offset, value);
104                 dev_err_ratelimited(DEV(ndev),
105                                     "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
106         }
107 }
108
109 static void clear_pom_err_intr(struct nitrox_device *ndev)
110 {
111         u64 value;
112
113         value = nitrox_read_csr(ndev, POM_INT);
114         nitrox_write_csr(ndev, POM_INT, value);
115         dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
116 }
117
118 static void clear_pem_err_intr(struct nitrox_device *ndev)
119 {
120         u64 value;
121
122         value = nitrox_read_csr(ndev, PEM0_INT);
123         nitrox_write_csr(ndev, PEM0_INT, value);
124         dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
125 }
126
127 static void clear_lbc_err_intr(struct nitrox_device *ndev)
128 {
129         union lbc_int lbc_int;
130         u64 value, offset;
131         int i;
132
133         lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
134         dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
135
136         if (lbc_int.s.dma_rd_err) {
137                 for (i = 0; i < NR_CLUSTERS; i++) {
138                         offset = EFL_CORE_VF_ERR_INT0X(i);
139                         value = nitrox_read_csr(ndev, offset);
140                         nitrox_write_csr(ndev, offset, value);
141                         offset = EFL_CORE_VF_ERR_INT1X(i);
142                         value = nitrox_read_csr(ndev, offset);
143                         nitrox_write_csr(ndev, offset, value);
144                 }
145         }
146
147         if (lbc_int.s.cam_soft_err) {
148                 dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
149                 invalidate_lbc(ndev);
150         }
151
152         if (lbc_int.s.pref_dat_len_mismatch_err) {
153                 offset = LBC_PLM_VF1_64_INT;
154                 value = nitrox_read_csr(ndev, offset);
155                 nitrox_write_csr(ndev, offset, value);
156                 offset = LBC_PLM_VF65_128_INT;
157                 value = nitrox_read_csr(ndev, offset);
158                 nitrox_write_csr(ndev, offset, value);
159         }
160
161         if (lbc_int.s.rd_dat_len_mismatch_err) {
162                 offset = LBC_ELM_VF1_64_INT;
163                 value = nitrox_read_csr(ndev, offset);
164                 nitrox_write_csr(ndev, offset, value);
165                 offset = LBC_ELM_VF65_128_INT;
166                 value = nitrox_read_csr(ndev, offset);
167                 nitrox_write_csr(ndev, offset, value);
168         }
169         nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
170 }
171
172 static void clear_efl_err_intr(struct nitrox_device *ndev)
173 {
174         int i;
175
176         for (i = 0; i < NR_CLUSTERS; i++) {
177                 union efl_core_int core_int;
178                 u64 value, offset;
179
180                 offset = EFL_CORE_INTX(i);
181                 core_int.value = nitrox_read_csr(ndev, offset);
182                 nitrox_write_csr(ndev, offset, core_int.value);
183                 dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
184                                     i, core_int.value);
185                 if (core_int.s.se_err) {
186                         offset = EFL_CORE_SE_ERR_INTX(i);
187                         value = nitrox_read_csr(ndev, offset);
188                         nitrox_write_csr(ndev, offset, value);
189                 }
190         }
191 }
192
193 static void clear_bmi_err_intr(struct nitrox_device *ndev)
194 {
195         u64 value;
196
197         value = nitrox_read_csr(ndev, BMI_INT);
198         nitrox_write_csr(ndev, BMI_INT, value);
199         dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
200 }
201
202 static void nps_core_int_tasklet(unsigned long data)
203 {
204         struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
205         struct nitrox_device *ndev = qvec->ndev;
206
207         /* if pf mode do queue recovery */
208         if (ndev->mode == __NDEV_MODE_PF) {
209         } else {
210                 /**
211                  * if VF(s) enabled communicate the error information
212                  * to VF(s)
213                  */
214         }
215 }
216
217 /**
218  * nps_core_int_isr - interrupt handler for NITROX errors and
219  *   mailbox communication
220  */
221 static irqreturn_t nps_core_int_isr(int irq, void *data)
222 {
223         struct nitrox_device *ndev = data;
224         union nps_core_int_active core_int;
225
226         core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
227
228         if (core_int.s.nps_core)
229                 clear_nps_core_err_intr(ndev);
230
231         if (core_int.s.nps_pkt)
232                 clear_nps_pkt_err_intr(ndev);
233
234         if (core_int.s.pom)
235                 clear_pom_err_intr(ndev);
236
237         if (core_int.s.pem)
238                 clear_pem_err_intr(ndev);
239
240         if (core_int.s.lbc)
241                 clear_lbc_err_intr(ndev);
242
243         if (core_int.s.efl)
244                 clear_efl_err_intr(ndev);
245
246         if (core_int.s.bmi)
247                 clear_bmi_err_intr(ndev);
248
249         /* If more work callback the ISR, set resend */
250         core_int.s.resend = 1;
251         nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
252
253         return IRQ_HANDLED;
254 }
255
256 void nitrox_unregister_interrupts(struct nitrox_device *ndev)
257 {
258         struct pci_dev *pdev = ndev->pdev;
259         int i;
260
261         for (i = 0; i < ndev->num_vecs; i++) {
262                 struct nitrox_q_vector *qvec;
263                 int vec;
264
265                 qvec = ndev->qvec + i;
266                 if (!qvec->valid)
267                         continue;
268
269                 /* get the vector number */
270                 vec = pci_irq_vector(pdev, i);
271                 irq_set_affinity_hint(vec, NULL);
272                 free_irq(vec, qvec);
273
274                 tasklet_disable(&qvec->resp_tasklet);
275                 tasklet_kill(&qvec->resp_tasklet);
276                 qvec->valid = false;
277         }
278         kfree(ndev->qvec);
279         ndev->qvec = NULL;
280         pci_free_irq_vectors(pdev);
281 }
282
283 int nitrox_register_interrupts(struct nitrox_device *ndev)
284 {
285         struct pci_dev *pdev = ndev->pdev;
286         struct nitrox_q_vector *qvec;
287         int nr_vecs, vec, cpu;
288         int ret, i;
289
290         /*
291          * PF MSI-X vectors
292          *
293          * Entry 0: NPS PKT ring 0
294          * Entry 1: AQMQ ring 0
295          * Entry 2: ZQM ring 0
296          * Entry 3: NPS PKT ring 1
297          * Entry 4: AQMQ ring 1
298          * Entry 5: ZQM ring 1
299          * ....
300          * Entry 192: NPS_CORE_INT_ACTIVE
301          */
302         nr_vecs = pci_msix_vec_count(pdev);
303
304         /* Enable MSI-X */
305         ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
306         if (ret < 0) {
307                 dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
308                 return ret;
309         }
310         ndev->num_vecs = nr_vecs;
311
312         ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
313         if (!ndev->qvec) {
314                 pci_free_irq_vectors(pdev);
315                 return -ENOMEM;
316         }
317
318         /* request irqs for packet rings/ports */
319         for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
320                 qvec = &ndev->qvec[i];
321
322                 qvec->ring = i / NR_RING_VECTORS;
323                 if (qvec->ring >= ndev->nr_queues)
324                         break;
325
326                 qvec->cmdq = &ndev->pkt_inq[qvec->ring];
327                 snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
328                 /* get the vector number */
329                 vec = pci_irq_vector(pdev, i);
330                 ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
331                 if (ret) {
332                         dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
333                                 qvec->ring);
334                         goto irq_fail;
335                 }
336                 cpu = qvec->ring % num_online_cpus();
337                 irq_set_affinity_hint(vec, get_cpu_mask(cpu));
338
339                 tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
340                              (unsigned long)qvec);
341                 qvec->valid = true;
342         }
343
344         /* request irqs for non ring vectors */
345         i = NON_RING_MSIX_BASE;
346         qvec = &ndev->qvec[i];
347         qvec->ndev = ndev;
348
349         snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
350         /* get the vector number */
351         vec = pci_irq_vector(pdev, i);
352         ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
353         if (ret) {
354                 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
355                 goto irq_fail;
356         }
357         cpu = num_online_cpus();
358         irq_set_affinity_hint(vec, get_cpu_mask(cpu));
359
360         tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
361                      (unsigned long)qvec);
362         qvec->valid = true;
363
364         return 0;
365
366 irq_fail:
367         nitrox_unregister_interrupts(ndev);
368         return ret;
369 }
370
371 void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
372 {
373         struct pci_dev *pdev = ndev->pdev;
374         int i;
375
376         for (i = 0; i < ndev->num_vecs; i++) {
377                 struct nitrox_q_vector *qvec;
378                 int vec;
379
380                 qvec = ndev->qvec + i;
381                 if (!qvec->valid)
382                         continue;
383
384                 vec = ndev->iov.msix.vector;
385                 irq_set_affinity_hint(vec, NULL);
386                 free_irq(vec, qvec);
387
388                 tasklet_disable(&qvec->resp_tasklet);
389                 tasklet_kill(&qvec->resp_tasklet);
390                 qvec->valid = false;
391         }
392         kfree(ndev->qvec);
393         ndev->qvec = NULL;
394         pci_disable_msix(pdev);
395 }
396
397 int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
398 {
399         struct pci_dev *pdev = ndev->pdev;
400         struct nitrox_q_vector *qvec;
401         int vec, cpu;
402         int ret;
403
404         /**
405          * only non ring vectors i.e Entry 192 is available
406          * for PF in SR-IOV mode.
407          */
408         ndev->iov.msix.entry = NON_RING_MSIX_BASE;
409         ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
410         if (ret) {
411                 dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
412                         NON_RING_MSIX_BASE);
413                 return ret;
414         }
415
416         qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
417         if (!qvec) {
418                 pci_disable_msix(pdev);
419                 return -ENOMEM;
420         }
421         qvec->ndev = ndev;
422
423         ndev->qvec = qvec;
424         ndev->num_vecs = NR_NON_RING_VECTORS;
425         snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
426                  NON_RING_MSIX_BASE);
427
428         vec = ndev->iov.msix.vector;
429         ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
430         if (ret) {
431                 dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
432                         NON_RING_MSIX_BASE);
433                 goto iov_irq_fail;
434         }
435         cpu = num_online_cpus();
436         irq_set_affinity_hint(vec, get_cpu_mask(cpu));
437
438         tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
439                      (unsigned long)qvec);
440         qvec->valid = true;
441
442         return 0;
443
444 iov_irq_fail:
445         nitrox_sriov_unregister_interrupts(ndev);
446         return ret;
447 }