1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
36 #include <linux/bitops.h>
37 #include <linux/delay.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/errno.h>
40 #include <linux/interrupt.h>
41 #include <linux/kernel.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/string.h>
48 #include "qed_init_ops.h"
51 #include "qed_reg_addr.h"
53 #include "qed_sriov.h"
57 qed_int_comp_cb_t comp_cb;
61 struct qed_sb_sp_info {
62 struct qed_sb_info sb_info;
64 /* per protocol index data */
65 struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
68 enum qed_attention_type {
73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
76 struct aeu_invert_reg_bit {
79 #define ATTENTION_PARITY (1 << 0)
81 #define ATTENTION_LENGTH_MASK (0x00000ff0)
82 #define ATTENTION_LENGTH_SHIFT (4)
83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
84 ATTENTION_LENGTH_SHIFT)
85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
90 /* Multiple bits start with this offset */
91 #define ATTENTION_OFFSET_MASK (0x000ff000)
92 #define ATTENTION_OFFSET_SHIFT (12)
94 #define ATTENTION_BB_MASK (0x00700000)
95 #define ATTENTION_BB_SHIFT (20)
96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
97 #define ATTENTION_BB_DIFFERENT BIT(23)
101 /* Callback to call if attention will be triggered */
102 int (*cb)(struct qed_hwfn *p_hwfn);
104 enum block_id block_index;
107 struct aeu_invert_reg {
108 struct aeu_invert_reg_bit bits[32];
111 #define MAX_ATTN_GRPS (8)
112 #define NUM_ATTN_REGS (9)
114 /* Specific HW attention callbacks */
115 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
117 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
119 /* This might occur on certain instances; Log it once then mask it */
120 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
122 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
128 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
129 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
130 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
131 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
133 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
135 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
137 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
139 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
141 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
143 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
144 PSWHST_REG_INCORRECT_ACCESS_VALID);
146 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
147 u32 addr, data, length;
149 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
150 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
151 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
152 PSWHST_REG_INCORRECT_ACCESS_DATA);
153 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
154 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
156 DP_INFO(p_hwfn->cdev,
157 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
159 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
160 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
162 ATTENTION_INCORRECT_ACCESS_VF_VALID),
164 ATTENTION_INCORRECT_ACCESS_CLIENT),
165 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
167 ATTENTION_INCORRECT_ACCESS_BYTE_EN),
174 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
175 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
176 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
177 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
178 #define QED_GRC_ATTENTION_MASTER_MASK (0xf)
179 #define QED_GRC_ATTENTION_MASTER_SHIFT (24)
180 #define QED_GRC_ATTENTION_PF_MASK (0xf)
181 #define QED_GRC_ATTENTION_PF_SHIFT (0)
182 #define QED_GRC_ATTENTION_VF_MASK (0xff)
183 #define QED_GRC_ATTENTION_VF_SHIFT (4)
184 #define QED_GRC_ATTENTION_PRIV_MASK (0x3)
185 #define QED_GRC_ATTENTION_PRIV_SHIFT (14)
186 #define QED_GRC_ATTENTION_PRIV_VF (0)
187 static const char *attn_master_to_str(u8 master)
190 case 1: return "PXP";
191 case 2: return "MCP";
192 case 3: return "MSDM";
193 case 4: return "PSDM";
194 case 5: return "YSDM";
195 case 6: return "USDM";
196 case 7: return "TSDM";
197 case 8: return "XSDM";
198 case 9: return "DBU";
199 case 10: return "DMAE";
205 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
209 /* We've already cleared the timeout interrupt register, so we learn
210 * of interrupts via the validity register
212 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
213 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
214 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
217 /* Read the GRC timeout information */
218 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
219 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
220 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
221 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
223 DP_INFO(p_hwfn->cdev,
224 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
226 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
227 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
228 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
229 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
230 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
231 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
232 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
235 /* Regardles of anything else, clean the validity bit */
236 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
237 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
241 #define PGLUE_ATTENTION_VALID (1 << 29)
242 #define PGLUE_ATTENTION_RD_VALID (1 << 26)
243 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
244 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
245 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
247 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
248 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
249 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
251 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
252 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
253 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
255 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
256 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
257 #define PGLUE_ATTENTION_ILT_VALID (1 << 23)
259 int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
260 struct qed_ptt *p_ptt)
264 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
265 if (tmp & PGLUE_ATTENTION_VALID) {
266 u32 addr_lo, addr_hi, details;
268 addr_lo = qed_rd(p_hwfn, p_ptt,
269 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
270 addr_hi = qed_rd(p_hwfn, p_ptt,
271 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
272 details = qed_rd(p_hwfn, p_ptt,
273 PGLUE_B_REG_TX_ERR_WR_DETAILS);
276 "Illegal write by chip to [%08x:%08x] blocked.\n"
277 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
278 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
279 addr_hi, addr_lo, details,
280 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
281 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
283 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
286 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
288 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
290 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
293 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
294 if (tmp & PGLUE_ATTENTION_RD_VALID) {
295 u32 addr_lo, addr_hi, details;
297 addr_lo = qed_rd(p_hwfn, p_ptt,
298 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
299 addr_hi = qed_rd(p_hwfn, p_ptt,
300 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
301 details = qed_rd(p_hwfn, p_ptt,
302 PGLUE_B_REG_TX_ERR_RD_DETAILS);
305 "Illegal read by chip from [%08x:%08x] blocked.\n"
306 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
307 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
308 addr_hi, addr_lo, details,
309 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
310 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
312 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
315 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
317 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
319 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
322 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
323 if (tmp & PGLUE_ATTENTION_ICPL_VALID)
324 DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp);
326 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
327 if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
328 u32 addr_hi, addr_lo;
330 addr_lo = qed_rd(p_hwfn, p_ptt,
331 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
332 addr_hi = qed_rd(p_hwfn, p_ptt,
333 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
335 DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n",
336 tmp, addr_hi, addr_lo);
339 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
340 if (tmp & PGLUE_ATTENTION_ILT_VALID) {
341 u32 addr_hi, addr_lo, details;
343 addr_lo = qed_rd(p_hwfn, p_ptt,
344 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
345 addr_hi = qed_rd(p_hwfn, p_ptt,
346 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
347 details = qed_rd(p_hwfn, p_ptt,
348 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
351 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
352 details, tmp, addr_hi, addr_lo);
355 /* Clear the indications */
356 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2));
361 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
363 return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
366 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
367 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
368 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
369 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
370 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
372 #define QED_DB_REC_COUNT 1000
373 #define QED_DB_REC_INTERVAL 100
375 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
376 struct qed_ptt *p_ptt)
378 u32 count = QED_DB_REC_COUNT;
381 /* wait for usage to zero or count to run out. This is necessary since
382 * EDPM doorbell transactions can take multiple 64b cycles, and as such
383 * can "split" over the pci. Possibly, the doorbell drop can happen with
384 * half an EDPM in the queue and other half dropped. Another EDPM
385 * doorbell to the same address (from doorbell recovery mechanism or
386 * from the doorbelling entity) could have first half dropped and second
387 * half interpreted as continuation of the first. To prevent such
388 * malformed doorbells from reaching the device, flush the queue before
389 * releasing the overflow sticky indication.
391 while (count-- && usage) {
392 usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
393 udelay(QED_DB_REC_INTERVAL);
396 /* should have been depleted by now */
398 DP_NOTICE(p_hwfn->cdev,
399 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
400 QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage);
407 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
412 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
413 DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
415 qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
419 if (qed_edpm_enabled(p_hwfn)) {
420 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
425 /* Flush any pending (e)dpm as they may never arrive */
426 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
428 /* Release overflow sticky indication (stop silently dropping everything) */
429 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
431 /* Repeat all last doorbells (doorbell drop recovery) */
432 qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
437 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
439 u32 int_sts, first_drop_reason, details, address, all_drops_reason;
440 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
443 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
444 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
446 /* int_sts may be zero since all PFs were interrupted for doorbell
447 * overflow but another one already handled it. Can abort here. If
448 * This PF also requires overflow recovery we will be interrupted again.
449 * The masked almost full indication may also be set. Ignoring.
451 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
454 /* check if db_drop or overflow happened */
455 if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
456 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
457 /* Obtain data about db drop/overflow */
458 first_drop_reason = qed_rd(p_hwfn, p_ptt,
459 DORQ_REG_DB_DROP_REASON) &
460 QED_DORQ_ATTENTION_REASON_MASK;
461 details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS);
462 address = qed_rd(p_hwfn, p_ptt,
463 DORQ_REG_DB_DROP_DETAILS_ADDRESS);
464 all_drops_reason = qed_rd(p_hwfn, p_ptt,
465 DORQ_REG_DB_DROP_DETAILS_REASON);
468 DP_NOTICE(p_hwfn->cdev,
469 "Doorbell drop occurred\n"
470 "Address\t\t0x%08x\t(second BAR address)\n"
471 "FID\t\t0x%04x\t\t(Opaque FID)\n"
472 "Size\t\t0x%04x\t\t(in bytes)\n"
473 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
474 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
476 GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE),
477 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
478 first_drop_reason, all_drops_reason);
480 rc = qed_db_rec_handler(p_hwfn, p_ptt);
481 qed_periodic_db_rec_start(p_hwfn);
485 /* Clear the doorbell drop details and prepare for next drop */
486 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
488 /* Mark interrupt as handled (note: even if drop was due to a different
489 * reason than overflow we mark as handled)
494 DORQ_REG_INT_STS_DB_DROP |
495 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
497 /* If there are no indications other than drop indications, success */
498 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
499 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
500 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
504 /* Some other indication was present - non recoverable */
505 DP_INFO(p_hwfn, "DORQ fatal attention\n");
510 /* Instead of major changes to the data-structure, we have a some 'special'
511 * identifiers for sources that changed meaning between adapters.
513 enum aeu_invert_reg_special_type {
514 AEU_INVERT_REG_SPECIAL_CNIG_0,
515 AEU_INVERT_REG_SPECIAL_CNIG_1,
516 AEU_INVERT_REG_SPECIAL_CNIG_2,
517 AEU_INVERT_REG_SPECIAL_CNIG_3,
518 AEU_INVERT_REG_SPECIAL_MAX,
521 static struct aeu_invert_reg_bit
522 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
523 {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
524 {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
525 {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
526 {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
529 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
530 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
532 { /* After Invert 1 */
534 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
539 { /* After Invert 2 */
540 {"PGLUE config_space", ATTENTION_SINGLE,
542 {"PGLUE misc_flr", ATTENTION_SINGLE,
544 {"PGLUE B RBC", ATTENTION_PAR_INT,
545 qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
546 {"PGLUE misc_mctp", ATTENTION_SINGLE,
548 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
549 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
550 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
551 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
552 (1 << ATTENTION_OFFSET_SHIFT),
554 {"PCIE glue/PXP VPD %d",
555 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
560 { /* After Invert 3 */
561 {"General Attention %d",
562 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
567 { /* After Invert 4 */
568 {"General Attention 32", ATTENTION_SINGLE,
570 {"General Attention %d",
571 (2 << ATTENTION_LENGTH_SHIFT) |
572 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
573 {"General Attention 35", ATTENTION_SINGLE,
576 ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
577 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
580 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
581 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
584 ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
585 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
588 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
589 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
591 {"MCP CPU", ATTENTION_SINGLE,
592 qed_mcp_attn_cb, MAX_BLOCK_ID},
593 {"MCP Watchdog timer", ATTENTION_SINGLE,
595 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
596 {"AVS stop status ready", ATTENTION_SINGLE,
598 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
599 {"MSTAT per-path", ATTENTION_PAR_INT,
601 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
603 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
604 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
605 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
606 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
607 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
612 { /* After Invert 5 */
613 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
614 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
615 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
616 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
617 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
618 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
619 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
620 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM},
621 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
622 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
623 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
624 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
625 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
626 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
627 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
628 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
633 { /* After Invert 6 */
634 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
635 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
636 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
637 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
638 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
639 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
640 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
641 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
642 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
643 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
644 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
645 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
646 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
647 {"DORQ", ATTENTION_PAR_INT,
648 qed_dorq_attn_cb, BLOCK_DORQ},
649 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
650 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
655 { /* After Invert 7 */
656 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
657 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
658 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
659 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
660 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
661 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
662 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
663 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
664 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
665 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
666 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
667 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
668 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
669 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
670 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
671 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
672 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
677 { /* After Invert 8 */
678 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
680 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
681 {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
683 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
684 {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
686 {"PSWHST", ATTENTION_PAR_INT,
687 qed_pswhst_attn_cb, BLOCK_PSWHST},
688 {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
689 NULL, BLOCK_PSWHST2},
690 {"GRC", ATTENTION_PAR_INT,
691 qed_grc_attn_cb, BLOCK_GRC},
692 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
693 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
694 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
695 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
696 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
697 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
698 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
699 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
700 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
701 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
703 {"PERST_B assertion", ATTENTION_SINGLE,
705 {"PERST_B deassertion", ATTENTION_SINGLE,
707 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
713 { /* After Invert 9 */
714 {"MCP Latched memory", ATTENTION_PAR,
716 {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
718 {"MCP Latched ump_tx", ATTENTION_PAR,
720 {"MCP Latched scratchpad", ATTENTION_PAR,
722 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
728 static struct aeu_invert_reg_bit *
729 qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
730 struct aeu_invert_reg_bit *p_bit)
732 if (!QED_IS_BB(p_hwfn->cdev))
735 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
738 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
742 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
743 struct aeu_invert_reg_bit *p_bit)
745 return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
749 #define ATTN_STATE_BITS (0xfff)
750 #define ATTN_BITS_MASKABLE (0x3ff)
751 struct qed_sb_attn_info {
752 /* Virtual & Physical address of the SB */
753 struct atten_status_block *sb_attn;
756 /* Last seen running index */
759 /* A mask of the AEU bits resulting in a parity error */
760 u32 parity_mask[NUM_ATTN_REGS];
762 /* A pointer to the attention description structure */
763 struct aeu_invert_reg *p_aeu_desc;
765 /* Previously asserted attentions, which are still unasserted */
768 /* Cleanup address for the link's general hw attention */
772 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
773 struct qed_sb_attn_info *p_sb_desc)
777 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
778 if (p_sb_desc->index != index) {
779 p_sb_desc->index = index;
787 * @brief qed_int_assertion - handles asserted attention bits
790 * @param asserted_bits newly asserted bits
793 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
795 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
798 /* Mask the source of the attention in the IGU */
799 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
800 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
801 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
802 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
803 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
805 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
806 "inner known ATTN state: 0x%04x --> 0x%04x\n",
807 sb_attn_sw->known_attn,
808 sb_attn_sw->known_attn | asserted_bits);
809 sb_attn_sw->known_attn |= asserted_bits;
811 /* Handle MCP events */
812 if (asserted_bits & 0x100) {
813 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
814 /* Clean the MCP attention */
815 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
816 sb_attn_sw->mfw_attn_addr, 0);
819 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
820 GTT_BAR0_MAP_REG_IGU_CMD +
821 ((IGU_CMD_ATTN_BIT_SET_UPPER -
822 IGU_CMD_INT_ACK_BASE) << 3),
825 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
831 static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
833 enum dbg_attn_type type, bool b_clear)
835 struct dbg_attn_block_result attn_results;
836 enum dbg_status status;
838 memset(&attn_results, 0, sizeof(attn_results));
840 status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
841 b_clear, &attn_results);
842 if (status != DBG_STATUS_OK)
844 "Failed to parse attention information [status: %s]\n",
845 qed_dbg_get_status_str(status));
847 qed_dbg_parse_attn(p_hwfn, &attn_results);
851 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
852 * cause of the attention
855 * @param p_aeu - descriptor of an AEU bit which caused the attention
856 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
857 * this bit to this group.
858 * @param bit_index - index of this bit in the aeu_en_reg
863 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
864 struct aeu_invert_reg_bit *p_aeu,
866 const char *p_bit_name, u32 bitmask)
868 bool b_fatal = false;
872 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
873 p_bit_name, bitmask);
875 /* Call callback before clearing the interrupt status */
877 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
879 rc = p_aeu->cb(p_hwfn);
885 /* Print HW block interrupt registers */
886 if (p_aeu->block_index != MAX_BLOCK_ID)
887 qed_int_attn_print(p_hwfn, p_aeu->block_index,
888 ATTN_TYPE_INTERRUPT, !b_fatal);
891 /* If the attention is benign, no need to prevent it */
895 /* Prevent this Attention from being asserted in the future */
896 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
897 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
898 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
906 * @brief qed_int_deassertion_parity - handle a single parity AEU source
909 * @param p_aeu - descriptor of an AEU bit which caused the parity
910 * @param aeu_en_reg - address of the AEU enable register
913 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
914 struct aeu_invert_reg_bit *p_aeu,
915 u32 aeu_en_reg, u8 bit_index)
917 u32 block_id = p_aeu->block_index, mask, val;
919 DP_NOTICE(p_hwfn->cdev,
920 "%s parity attention is set [address 0x%08x, bit %d]\n",
921 p_aeu->bit_name, aeu_en_reg, bit_index);
923 if (block_id != MAX_BLOCK_ID) {
924 qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
926 /* In BB, there's a single parity bit for several blocks */
927 if (block_id == BLOCK_BTB) {
928 qed_int_attn_print(p_hwfn, BLOCK_OPTE,
929 ATTN_TYPE_PARITY, false);
930 qed_int_attn_print(p_hwfn, BLOCK_MCP,
931 ATTN_TYPE_PARITY, false);
935 /* Prevent this parity error from being re-asserted */
936 mask = ~BIT(bit_index);
937 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
938 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
939 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
944 * @brief - handles deassertion of previously asserted attentions.
947 * @param deasserted_bits - newly deasserted bits
951 static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
954 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
955 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
959 /* Read the attention registers in the AEU */
960 for (i = 0; i < NUM_ATTN_REGS; i++) {
961 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
962 MISC_REG_AEU_AFTER_INVERT_1_IGU +
964 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
965 "Deasserted bits [%d]: %08x\n",
969 /* Find parity attentions first */
970 for (i = 0; i < NUM_ATTN_REGS; i++) {
971 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
974 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
975 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
977 /* Skip register in which no parity bit is currently set */
978 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
982 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
983 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
985 if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
986 !!(parities & BIT(bit_idx)))
987 qed_int_deassertion_parity(p_hwfn, p_bit,
990 bit_idx += ATTENTION_LENGTH(p_bit->flags);
994 /* Find non-parity cause for attention and act */
995 for (k = 0; k < MAX_ATTN_GRPS; k++) {
996 struct aeu_invert_reg_bit *p_aeu;
998 /* Handle only groups whose attention is currently deasserted */
999 if (!(deasserted_bits & (1 << k)))
1002 for (i = 0; i < NUM_ATTN_REGS; i++) {
1005 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1007 k * sizeof(u32) * NUM_ATTN_REGS;
1009 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1010 bits = aeu_inv_arr[i] & en;
1012 /* Skip if no bit from this group is currently set */
1016 /* Find all set bits from current register which belong
1017 * to current group, making them responsible for the
1018 * previous assertion.
1020 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1021 long unsigned int bitmask;
1024 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1025 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
1028 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1029 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
1035 bitmask = bits & (((1 << bit_len) - 1) << bit);
1039 u32 flags = p_aeu->flags;
1043 num = (u8)find_first_bit(&bitmask,
1046 /* Some bits represent more than a
1047 * a single interrupt. Correctly print
1050 if (ATTENTION_LENGTH(flags) > 2 ||
1051 ((flags & ATTENTION_PAR_INT) &&
1052 ATTENTION_LENGTH(flags) > 1))
1053 snprintf(bit_name, 30,
1054 p_aeu->bit_name, num);
1057 p_aeu->bit_name, 30);
1059 /* We now need to pass bitmask in its
1064 /* Handle source of the attention */
1065 qed_int_deassertion_aeu_bit(p_hwfn,
1072 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1077 /* Clear IGU indication for the deasserted bits */
1078 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
1079 GTT_BAR0_MAP_REG_IGU_CMD +
1080 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1081 IGU_CMD_INT_ACK_BASE) << 3),
1082 ~((u32)deasserted_bits));
1084 /* Unmask deasserted attentions in IGU */
1085 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
1086 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1087 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1089 /* Clear deassertion from inner state */
1090 sb_attn_sw->known_attn &= ~deasserted_bits;
1095 static int qed_int_attentions(struct qed_hwfn *p_hwfn)
1097 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1098 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1099 u32 attn_bits = 0, attn_acks = 0;
1100 u16 asserted_bits, deasserted_bits;
1104 /* Read current attention bits/acks - safeguard against attentions
1105 * by guaranting work on a synchronized timeframe
1108 index = p_sb_attn->sb_index;
1109 /* finish reading index before the loop condition */
1111 attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
1112 attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
1113 } while (index != p_sb_attn->sb_index);
1114 p_sb_attn->sb_index = index;
1116 /* Attention / Deassertion are meaningful (and in correct state)
1117 * only when they differ and consistent with known state - deassertion
1118 * when previous attention & current ack, and assertion when current
1119 * attention with no previous attention
1121 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1122 ~p_sb_attn_sw->known_attn;
1123 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1124 p_sb_attn_sw->known_attn;
1126 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
1128 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1129 index, attn_bits, attn_acks, asserted_bits,
1130 deasserted_bits, p_sb_attn_sw->known_attn);
1131 } else if (asserted_bits == 0x100) {
1132 DP_INFO(p_hwfn, "MFW indication via attention\n");
1134 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1135 "MFW indication [deassertion]\n");
1138 if (asserted_bits) {
1139 rc = qed_int_assertion(p_hwfn, asserted_bits);
1144 if (deasserted_bits)
1145 rc = qed_int_deassertion(p_hwfn, deasserted_bits);
1150 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
1151 void __iomem *igu_addr, u32 ack_cons)
1153 struct igu_prod_cons_update igu_ack = { 0 };
1155 igu_ack.sb_id_and_flags =
1156 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1157 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1158 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1159 (IGU_SEG_ACCESS_ATTN <<
1160 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1162 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
1164 /* Both segments (interrupts & acks) are written to same place address;
1165 * Need to guarantee all commands will be received (in-order) by HW.
1170 void qed_int_sp_dpc(unsigned long hwfn_cookie)
1172 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
1173 struct qed_pi_info *pi_info = NULL;
1174 struct qed_sb_attn_info *sb_attn;
1175 struct qed_sb_info *sb_info;
1179 if (!p_hwfn->p_sp_sb) {
1180 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
1184 sb_info = &p_hwfn->p_sp_sb->sb_info;
1185 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1187 DP_ERR(p_hwfn->cdev,
1188 "Status block is NULL - cannot ack interrupts\n");
1192 if (!p_hwfn->p_sb_attn) {
1193 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
1196 sb_attn = p_hwfn->p_sb_attn;
1198 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1199 p_hwfn, p_hwfn->my_id);
1201 /* Disable ack for def status block. Required both for msix +
1202 * inta in non-mask mode, in inta does no harm.
1204 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1206 /* Gather Interrupts/Attentions information */
1207 if (!sb_info->sb_virt) {
1208 DP_ERR(p_hwfn->cdev,
1209 "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1211 u32 tmp_index = sb_info->sb_ack;
1213 rc = qed_sb_update_sb_idx(sb_info);
1214 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1215 "Interrupt indices: 0x%08x --> 0x%08x\n",
1216 tmp_index, sb_info->sb_ack);
1219 if (!sb_attn || !sb_attn->sb_attn) {
1220 DP_ERR(p_hwfn->cdev,
1221 "Attentions Status block is NULL - cannot check for new attentions!\n");
1223 u16 tmp_index = sb_attn->index;
1225 rc |= qed_attn_update_idx(p_hwfn, sb_attn);
1226 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1227 "Attention indices: 0x%08x --> 0x%08x\n",
1228 tmp_index, sb_attn->index);
1231 /* Check if we expect interrupts at this time. if not just ack them */
1232 if (!(rc & QED_SB_EVENT_MASK)) {
1233 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1237 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1238 if (!p_hwfn->p_dpc_ptt) {
1239 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
1240 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1244 if (rc & QED_SB_ATT_IDX)
1245 qed_int_attentions(p_hwfn);
1247 if (rc & QED_SB_IDX) {
1250 /* Look for a free index */
1251 for (pi = 0; pi < arr_size; pi++) {
1252 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1253 if (pi_info->comp_cb)
1254 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1258 if (sb_attn && (rc & QED_SB_ATT_IDX))
1259 /* This should be done before the interrupts are enabled,
1260 * since otherwise a new attention will be generated.
1262 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1264 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1267 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
1269 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1275 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1276 SB_ATTN_ALIGNED_SIZE(p_hwfn),
1277 p_sb->sb_attn, p_sb->sb_phys);
1279 p_hwfn->p_sb_attn = NULL;
1282 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
1283 struct qed_ptt *p_ptt)
1285 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1287 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1290 sb_info->known_attn = 0;
1292 /* Configure Attention Status Block in IGU */
1293 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1294 lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
1295 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1296 upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
1299 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
1300 struct qed_ptt *p_ptt,
1301 void *sb_virt_addr, dma_addr_t sb_phy_addr)
1303 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1306 sb_info->sb_attn = sb_virt_addr;
1307 sb_info->sb_phys = sb_phy_addr;
1309 /* Set the pointer to the AEU descriptors */
1310 sb_info->p_aeu_desc = aeu_descs;
1312 /* Calculate Parity Masks */
1313 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1314 for (i = 0; i < NUM_ATTN_REGS; i++) {
1315 /* j is array index, k is bit index */
1316 for (j = 0, k = 0; k < 32; j++) {
1317 struct aeu_invert_reg_bit *p_aeu;
1319 p_aeu = &aeu_descs[i].bits[j];
1320 if (qed_int_is_parity_flag(p_hwfn, p_aeu))
1321 sb_info->parity_mask[i] |= 1 << k;
1323 k += ATTENTION_LENGTH(p_aeu->flags);
1325 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1326 "Attn Mask [Reg %d]: 0x%08x\n",
1327 i, sb_info->parity_mask[i]);
1330 /* Set the address of cleanup for the mcp attention */
1331 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1332 MISC_REG_AEU_GENERAL_ATTN_0;
1334 qed_int_sb_attn_setup(p_hwfn, p_ptt);
1337 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
1338 struct qed_ptt *p_ptt)
1340 struct qed_dev *cdev = p_hwfn->cdev;
1341 struct qed_sb_attn_info *p_sb;
1342 dma_addr_t p_phys = 0;
1346 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1351 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1352 SB_ATTN_ALIGNED_SIZE(p_hwfn),
1353 &p_phys, GFP_KERNEL);
1360 /* Attention setup */
1361 p_hwfn->p_sb_attn = p_sb;
1362 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1367 /* coalescing timeout = timeset << (timer_res + 1) */
1368 #define QED_CAU_DEF_RX_USECS 24
1369 #define QED_CAU_DEF_TX_USECS 48
1371 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
1372 struct cau_sb_entry *p_sb_entry,
1373 u8 pf_id, u16 vf_number, u8 vf_valid)
1375 struct qed_dev *cdev = p_hwfn->cdev;
1379 memset(p_sb_entry, 0, sizeof(*p_sb_entry));
1381 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1382 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1383 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1384 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1385 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1387 cau_state = CAU_HC_DISABLE_STATE;
1389 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1390 cau_state = CAU_HC_ENABLE_STATE;
1391 if (!cdev->rx_coalesce_usecs)
1392 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
1393 if (!cdev->tx_coalesce_usecs)
1394 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
1397 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1398 if (cdev->rx_coalesce_usecs <= 0x7F)
1400 else if (cdev->rx_coalesce_usecs <= 0xFF)
1404 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1406 if (cdev->tx_coalesce_usecs <= 0x7F)
1408 else if (cdev->tx_coalesce_usecs <= 0xFF)
1412 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1414 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1415 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1418 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
1419 struct qed_ptt *p_ptt,
1422 enum qed_coalescing_fsm coalescing_fsm,
1425 struct cau_pi_entry pi_entry;
1426 u32 sb_offset, pi_offset;
1428 if (IS_VF(p_hwfn->cdev))
1431 sb_offset = igu_sb_id * PIS_PER_SB_E4;
1432 memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
1434 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1435 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
1436 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1438 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1440 pi_offset = sb_offset + pi_index;
1441 if (p_hwfn->hw_init_done) {
1442 qed_wr(p_hwfn, p_ptt,
1443 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1444 *((u32 *)&(pi_entry)));
1446 STORE_RT_REG(p_hwfn,
1447 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1448 *((u32 *)&(pi_entry)));
1452 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
1453 struct qed_ptt *p_ptt,
1455 u16 igu_sb_id, u16 vf_number, u8 vf_valid)
1457 struct cau_sb_entry sb_entry;
1459 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1460 vf_number, vf_valid);
1462 if (p_hwfn->hw_init_done) {
1463 /* Wide-bus, initialize via DMAE */
1464 u64 phys_addr = (u64)sb_phys;
1466 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
1467 CAU_REG_SB_ADDR_MEMORY +
1468 igu_sb_id * sizeof(u64), 2, 0);
1469 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
1470 CAU_REG_SB_VAR_MEMORY +
1471 igu_sb_id * sizeof(u64), 2, 0);
1473 /* Initialize Status Block Address */
1474 STORE_RT_REG_AGG(p_hwfn,
1475 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1479 STORE_RT_REG_AGG(p_hwfn,
1480 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1485 /* Configure pi coalescing if set */
1486 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1487 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1488 u8 timeset, timer_res;
1491 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1492 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
1494 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
1498 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
1499 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1500 QED_COAL_RX_STATE_MACHINE, timeset);
1502 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
1504 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
1508 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
1509 for (i = 0; i < num_tc; i++) {
1510 qed_int_cau_conf_pi(p_hwfn, p_ptt,
1511 igu_sb_id, TX_PI(i),
1512 QED_COAL_TX_STATE_MACHINE,
1518 void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
1519 struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
1521 /* zero status block and ack counter */
1522 sb_info->sb_ack = 0;
1523 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1525 if (IS_PF(p_hwfn->cdev))
1526 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1527 sb_info->igu_sb_id, 0, 0);
1530 struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
1532 struct qed_igu_block *p_block;
1535 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1537 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1539 if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1540 !(p_block->status & QED_IGU_STATUS_FREE))
1543 if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
1550 static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
1552 struct qed_igu_block *p_block;
1555 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1557 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1559 if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1561 p_block->vector_number != vector_id)
1567 return QED_SB_INVALID_IDX;
1570 u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1574 /* Assuming continuous set of IGU SBs dedicated for given PF */
1575 if (sb_id == QED_SP_SB_ID)
1576 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1577 else if (IS_PF(p_hwfn->cdev))
1578 igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1580 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
1582 if (sb_id == QED_SP_SB_ID)
1583 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1584 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1586 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1587 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1592 int qed_int_sb_init(struct qed_hwfn *p_hwfn,
1593 struct qed_ptt *p_ptt,
1594 struct qed_sb_info *sb_info,
1595 void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
1597 sb_info->sb_virt = sb_virt_addr;
1598 sb_info->sb_phys = sb_phy_addr;
1600 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
1602 if (sb_id != QED_SP_SB_ID) {
1603 if (IS_PF(p_hwfn->cdev)) {
1604 struct qed_igu_info *p_info;
1605 struct qed_igu_block *p_block;
1607 p_info = p_hwfn->hw_info.p_igu_info;
1608 p_block = &p_info->entry[sb_info->igu_sb_id];
1610 p_block->sb_info = sb_info;
1611 p_block->status &= ~QED_IGU_STATUS_FREE;
1612 p_info->usage.free_cnt--;
1614 qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1618 sb_info->cdev = p_hwfn->cdev;
1620 /* The igu address will hold the absolute address that needs to be
1621 * written to for a specific status block
1623 if (IS_PF(p_hwfn->cdev)) {
1624 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1625 GTT_BAR0_MAP_REG_IGU_CMD +
1626 (sb_info->igu_sb_id << 3);
1628 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1629 PXP_VF_BAR0_START_IGU +
1630 ((IGU_CMD_INT_ACK_BASE +
1631 sb_info->igu_sb_id) << 3);
1634 sb_info->flags |= QED_SB_INFO_INIT;
1636 qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
1641 int qed_int_sb_release(struct qed_hwfn *p_hwfn,
1642 struct qed_sb_info *sb_info, u16 sb_id)
1644 struct qed_igu_block *p_block;
1645 struct qed_igu_info *p_info;
1650 /* zero status block and ack counter */
1651 sb_info->sb_ack = 0;
1652 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1654 if (IS_VF(p_hwfn->cdev)) {
1655 qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
1659 p_info = p_hwfn->hw_info.p_igu_info;
1660 p_block = &p_info->entry[sb_info->igu_sb_id];
1662 /* Vector 0 is reserved to Default SB */
1663 if (!p_block->vector_number) {
1664 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1668 /* Lose reference to client's SB info, and fix counters */
1669 p_block->sb_info = NULL;
1670 p_block->status |= QED_IGU_STATUS_FREE;
1671 p_info->usage.free_cnt++;
1676 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
1678 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1683 if (p_sb->sb_info.sb_virt)
1684 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1685 SB_ALIGNED_SIZE(p_hwfn),
1686 p_sb->sb_info.sb_virt,
1687 p_sb->sb_info.sb_phys);
1689 p_hwfn->p_sp_sb = NULL;
1692 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1694 struct qed_sb_sp_info *p_sb;
1695 dma_addr_t p_phys = 0;
1699 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1704 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1705 SB_ALIGNED_SIZE(p_hwfn),
1706 &p_phys, GFP_KERNEL);
1712 /* Status Block setup */
1713 p_hwfn->p_sp_sb = p_sb;
1714 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
1715 p_phys, QED_SP_SB_ID);
1717 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1722 int qed_int_register_cb(struct qed_hwfn *p_hwfn,
1723 qed_int_comp_cb_t comp_cb,
1724 void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
1726 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1730 /* Look for a free index */
1731 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1732 if (p_sp_sb->pi_info_arr[pi].comp_cb)
1735 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1736 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1738 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1746 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
1748 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1750 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
1753 p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
1754 p_sp_sb->pi_info_arr[pi].cookie = NULL;
1759 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
1761 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1764 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
1765 struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1767 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1769 p_hwfn->cdev->int_mode = int_mode;
1770 switch (p_hwfn->cdev->int_mode) {
1771 case QED_INT_MODE_INTA:
1772 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1773 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1776 case QED_INT_MODE_MSI:
1777 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1778 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1781 case QED_INT_MODE_MSIX:
1782 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1784 case QED_INT_MODE_POLL:
1788 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1791 static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
1792 struct qed_ptt *p_ptt)
1795 /* Configure AEU signal change to produce attentions */
1796 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1797 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1798 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1799 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1801 /* Unmask AEU signals toward IGU */
1802 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1806 qed_int_igu_enable(struct qed_hwfn *p_hwfn,
1807 struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1811 qed_int_igu_enable_attn(p_hwfn, p_ptt);
1813 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1814 rc = qed_slowpath_irq_req(p_hwfn);
1816 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
1819 p_hwfn->b_int_requested = true;
1821 /* Enable interrupt Generation */
1822 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1823 p_hwfn->b_int_enabled = 1;
1828 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1830 p_hwfn->b_int_enabled = 0;
1832 if (IS_VF(p_hwfn->cdev))
1835 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1838 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1839 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
1840 struct qed_ptt *p_ptt,
1842 bool cleanup_set, u16 opaque_fid)
1844 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1845 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1846 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1848 /* Set the data field */
1849 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1850 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
1851 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1853 /* Set the control register */
1854 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1855 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1856 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1858 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1862 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1864 /* calculate where to read the status bit from */
1865 sb_bit = 1 << (igu_sb_id % 32);
1866 sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
1868 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
1870 /* Now wait for the command to complete */
1872 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
1874 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1877 usleep_range(5000, 10000);
1878 } while (--sleep_cnt);
1882 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1886 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
1887 struct qed_ptt *p_ptt,
1888 u16 igu_sb_id, u16 opaque, bool b_set)
1890 struct qed_igu_block *p_block;
1893 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1894 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1895 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1897 p_block->function_id,
1898 p_block->is_pf, p_block->vector_number);
1902 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
1905 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
1907 /* Wait for the IGU SB to cleanup */
1908 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1911 val = qed_rd(p_hwfn, p_ptt,
1912 IGU_REG_WRITE_DONE_PENDING +
1913 ((igu_sb_id / 32) * 4));
1914 if (val & BIT((igu_sb_id % 32)))
1915 usleep_range(10, 20);
1919 if (i == IGU_CLEANUP_SLEEP_LENGTH)
1921 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1924 /* Clear the CAU for the SB */
1925 for (pi = 0; pi < 12; pi++)
1926 qed_wr(p_hwfn, p_ptt,
1927 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
1930 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
1931 struct qed_ptt *p_ptt,
1932 bool b_set, bool b_slowpath)
1934 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1935 struct qed_igu_block *p_block;
1939 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1940 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1941 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1942 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1945 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
1946 p_block = &p_info->entry[igu_sb_id];
1948 if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1950 (p_block->status & QED_IGU_STATUS_DSB))
1953 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
1954 p_hwfn->hw_info.opaque_fid,
1959 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
1961 p_hwfn->hw_info.opaque_fid,
1965 int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1967 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1968 struct qed_igu_block *p_block;
1973 if (!RESC_NUM(p_hwfn, QED_SB)) {
1974 p_info->b_allow_pf_vf_change = false;
1976 /* Use the numbers the MFW have provided -
1977 * don't forget MFW accounts for the default SB as well.
1979 p_info->b_allow_pf_vf_change = true;
1981 if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
1983 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
1984 RESC_NUM(p_hwfn, QED_SB) - 1,
1986 p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
1989 if (IS_PF_SRIOV(p_hwfn)) {
1990 u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
1992 if (vfs != p_info->usage.iov_cnt)
1995 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
1996 p_info->usage.iov_cnt, vfs);
1998 /* At this point we know how many SBs we have totally
1999 * in IGU + number of PF SBs. So we can validate that
2000 * we'd have sufficient for VF.
2002 if (vfs > p_info->usage.free_cnt +
2003 p_info->usage.free_cnt_iov - p_info->usage.cnt) {
2005 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2006 p_info->usage.free_cnt +
2007 p_info->usage.free_cnt_iov,
2008 p_info->usage.cnt, vfs);
2012 /* Currently cap the number of VFs SBs by the
2015 p_info->usage.iov_cnt = vfs;
2019 /* Mark all SBs as free, now in the right PF/VFs division */
2020 p_info->usage.free_cnt = p_info->usage.cnt;
2021 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2022 p_info->usage.orig = p_info->usage.cnt;
2023 p_info->usage.iov_orig = p_info->usage.iov_cnt;
2025 /* We now proceed to re-configure the IGU cam to reflect the initial
2026 * configuration. We can start with the Default SB.
2028 pf_sbs = p_info->usage.cnt;
2029 vf_sbs = p_info->usage.iov_cnt;
2031 for (igu_sb_id = p_info->igu_dsb_id;
2032 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2033 p_block = &p_info->entry[igu_sb_id];
2036 if (!(p_block->status & QED_IGU_STATUS_VALID))
2039 if (p_block->status & QED_IGU_STATUS_DSB) {
2040 p_block->function_id = p_hwfn->rel_pf_id;
2042 p_block->vector_number = 0;
2043 p_block->status = QED_IGU_STATUS_VALID |
2046 } else if (pf_sbs) {
2048 p_block->function_id = p_hwfn->rel_pf_id;
2050 p_block->vector_number = p_info->usage.cnt - pf_sbs;
2051 p_block->status = QED_IGU_STATUS_VALID |
2053 QED_IGU_STATUS_FREE;
2054 } else if (vf_sbs) {
2055 p_block->function_id =
2056 p_hwfn->cdev->p_iov_info->first_vf_in_pf +
2057 p_info->usage.iov_cnt - vf_sbs;
2059 p_block->vector_number = 0;
2060 p_block->status = QED_IGU_STATUS_VALID |
2061 QED_IGU_STATUS_FREE;
2064 p_block->function_id = 0;
2066 p_block->vector_number = 0;
2069 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2070 p_block->function_id);
2071 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2072 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2073 p_block->vector_number);
2075 /* VF entries would be enabled when VF is initializaed */
2076 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2078 rval = qed_rd(p_hwfn, p_ptt,
2079 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2082 qed_wr(p_hwfn, p_ptt,
2083 IGU_REG_MAPPING_MEMORY +
2084 sizeof(u32) * igu_sb_id, val);
2088 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2090 p_block->function_id,
2092 p_block->vector_number, rval, val);
2099 static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
2100 struct qed_ptt *p_ptt, u16 igu_sb_id)
2102 u32 val = qed_rd(p_hwfn, p_ptt,
2103 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2104 struct qed_igu_block *p_block;
2106 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2108 /* Fill the block information */
2109 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
2110 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2111 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
2112 p_block->igu_sb_id = igu_sb_id;
2115 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2117 struct qed_igu_info *p_igu_info;
2118 struct qed_igu_block *p_block;
2119 u32 min_vf = 0, max_vf = 0;
2122 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
2123 if (!p_hwfn->hw_info.p_igu_info)
2126 p_igu_info = p_hwfn->hw_info.p_igu_info;
2128 /* Distinguish between existent and non-existent default SB */
2129 p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
2131 /* Find the range of VF ids whose SB belong to this PF */
2132 if (p_hwfn->cdev->p_iov_info) {
2133 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
2135 min_vf = p_iov->first_vf_in_pf;
2136 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2140 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2141 /* Read current entry; Notice it might not belong to this PF */
2142 qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2143 p_block = &p_igu_info->entry[igu_sb_id];
2145 if ((p_block->is_pf) &&
2146 (p_block->function_id == p_hwfn->rel_pf_id)) {
2147 p_block->status = QED_IGU_STATUS_PF |
2148 QED_IGU_STATUS_VALID |
2149 QED_IGU_STATUS_FREE;
2151 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2152 p_igu_info->usage.cnt++;
2153 } else if (!(p_block->is_pf) &&
2154 (p_block->function_id >= min_vf) &&
2155 (p_block->function_id < max_vf)) {
2156 /* Available for VFs of this PF */
2157 p_block->status = QED_IGU_STATUS_VALID |
2158 QED_IGU_STATUS_FREE;
2160 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2161 p_igu_info->usage.iov_cnt++;
2164 /* Mark the First entry belonging to the PF or its VFs
2165 * as the default SB [we'll reset IGU prior to first usage].
2167 if ((p_block->status & QED_IGU_STATUS_VALID) &&
2168 (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
2169 p_igu_info->igu_dsb_id = igu_sb_id;
2170 p_block->status |= QED_IGU_STATUS_DSB;
2173 /* limit number of prints by having each PF print only its
2174 * entries with the exception of PF0 which would print
2177 if ((p_block->status & QED_IGU_STATUS_VALID) ||
2178 (p_hwfn->abs_pf_id == 0)) {
2179 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2180 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2181 igu_sb_id, p_block->function_id,
2182 p_block->is_pf, p_block->vector_number);
2186 if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
2188 "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2189 p_igu_info->igu_dsb_id);
2193 /* All non default SB are considered free at this point */
2194 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2195 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2197 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2198 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2199 p_igu_info->igu_dsb_id,
2200 p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
2206 * @brief Initialize igu runtime registers
2210 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
2212 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2214 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2217 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
2219 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
2220 IGU_CMD_INT_ACK_BASE;
2221 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
2222 IGU_CMD_INT_ACK_BASE;
2223 u32 intr_status_hi = 0, intr_status_lo = 0;
2224 u64 intr_status = 0;
2226 intr_status_lo = REG_RD(p_hwfn,
2227 GTT_BAR0_MAP_REG_IGU_CMD +
2228 lsb_igu_cmd_addr * 8);
2229 intr_status_hi = REG_RD(p_hwfn,
2230 GTT_BAR0_MAP_REG_IGU_CMD +
2231 msb_igu_cmd_addr * 8);
2232 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2237 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
2239 tasklet_init(p_hwfn->sp_dpc,
2240 qed_int_sp_dpc, (unsigned long)p_hwfn);
2241 p_hwfn->b_sp_dpc_enabled = true;
2244 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
2246 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
2247 if (!p_hwfn->sp_dpc)
2253 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
2255 kfree(p_hwfn->sp_dpc);
2256 p_hwfn->sp_dpc = NULL;
2259 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2263 rc = qed_int_sp_dpc_alloc(p_hwfn);
2267 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
2271 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
2276 void qed_int_free(struct qed_hwfn *p_hwfn)
2278 qed_int_sp_sb_free(p_hwfn);
2279 qed_int_sb_attn_free(p_hwfn);
2280 qed_int_sp_dpc_free(p_hwfn);
2283 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2285 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2286 qed_int_sb_attn_setup(p_hwfn, p_ptt);
2287 qed_int_sp_dpc_setup(p_hwfn);
2290 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
2291 struct qed_sb_cnt_info *p_sb_cnt_info)
2293 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
2295 if (!info || !p_sb_cnt_info)
2298 memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
2301 void qed_int_disable_post_isr_release(struct qed_dev *cdev)
2305 for_each_hwfn(cdev, i)
2306 cdev->hwfns[i].b_int_requested = false;
2309 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2310 u8 timer_res, u16 sb_id, bool tx)
2312 struct cau_sb_entry sb_entry;
2315 if (!p_hwfn->hw_init_done) {
2316 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2320 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2321 sb_id * sizeof(u64),
2322 (u64)(uintptr_t)&sb_entry, 2, 0);
2324 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2329 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2331 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2333 rc = qed_dmae_host2grc(p_hwfn, p_ptt,
2334 (u64)(uintptr_t)&sb_entry,
2335 CAU_REG_SB_VAR_MEMORY +
2336 sb_id * sizeof(u64), 2, 0);
2338 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);