]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/hfi1/chip.c
drivers: Remove explicit invocations of mmiowb()
[linux.git] / drivers / infiniband / hw / hfi1 / chip.c
1 /*
2  * Copyright(c) 2015 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 #include "affinity.h"
67 #include "debugfs.h"
68 #include "fault.h"
69
70 uint kdeth_qp;
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78 /*
79  * Default time to aggregate two 10K packets from the idle state
80  * (timer not running). The timer starts at the end of the first packet,
81  * so only the time for one 10K packet and header plus a bit extra is needed.
82  * 10 * 1024 + 64 header byte = 10304 byte
83  * 10304 byte / 12.5 GB/s = 824.32ns
84  */
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97 uint loopback;
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
106
107 struct flag_table {
108         u64 flag;       /* the flag */
109         char *str;      /* description string */
110         u16 extra;      /* extra information */
111         u16 unused0;
112         u32 unused1;
113 };
114
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED       0x1
121 #define SEC_PACKET_DROPPED      0x2
122 #define SEC_SC_HALTED           0x4     /* per-context only */
123 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
124
125 #define DEFAULT_KRCVQS            2
126 #define MIN_KERNEL_KCTXTS         2
127 #define FIRST_KERNEL_KCTXT        1
128
129 /*
130  * RSM instance allocation
131  *   0 - Verbs
132  *   1 - User Fecn Handling
133  *   2 - Vnic
134  */
135 #define RSM_INS_VERBS             0
136 #define RSM_INS_FECN              1
137 #define RSM_INS_VNIC              2
138
139 /* Bit offset into the GUID which carries HFI id information */
140 #define GUID_HFI_INDEX_SHIFT     39
141
142 /* extract the emulation revision */
143 #define emulator_rev(dd) ((dd)->irev >> 8)
144 /* parallel and serial emulation versions are 3 and 4 respectively */
145 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
146 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
147
148 /* RSM fields for Verbs */
149 /* packet type */
150 #define IB_PACKET_TYPE         2ull
151 #define QW_SHIFT               6ull
152 /* QPN[7..1] */
153 #define QPN_WIDTH              7ull
154
155 /* LRH.BTH: QW 0, OFFSET 48 - for match */
156 #define LRH_BTH_QW             0ull
157 #define LRH_BTH_BIT_OFFSET     48ull
158 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
159 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
160 #define LRH_BTH_SELECT
161 #define LRH_BTH_MASK           3ull
162 #define LRH_BTH_VALUE          2ull
163
164 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
165 #define LRH_SC_QW              0ull
166 #define LRH_SC_BIT_OFFSET      56ull
167 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
168 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
169 #define LRH_SC_MASK            128ull
170 #define LRH_SC_VALUE           0ull
171
172 /* SC[n..0] QW 0, OFFSET 60 - for select */
173 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
174
175 /* QPN[m+n:1] QW 1, OFFSET 1 */
176 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
177
178 /* RSM fields for Vnic */
179 /* L2_TYPE: QW 0, OFFSET 61 - for match */
180 #define L2_TYPE_QW             0ull
181 #define L2_TYPE_BIT_OFFSET     61ull
182 #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
183 #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
184 #define L2_TYPE_MASK           3ull
185 #define L2_16B_VALUE           2ull
186
187 /* L4_TYPE QW 1, OFFSET 0 - for match */
188 #define L4_TYPE_QW              1ull
189 #define L4_TYPE_BIT_OFFSET      0ull
190 #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
191 #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
192 #define L4_16B_TYPE_MASK        0xFFull
193 #define L4_16B_ETH_VALUE        0x78ull
194
195 /* 16B VESWID - for select */
196 #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
197 /* 16B ENTROPY - for select */
198 #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
199
200 /* defines to build power on SC2VL table */
201 #define SC2VL_VAL( \
202         num, \
203         sc0, sc0val, \
204         sc1, sc1val, \
205         sc2, sc2val, \
206         sc3, sc3val, \
207         sc4, sc4val, \
208         sc5, sc5val, \
209         sc6, sc6val, \
210         sc7, sc7val) \
211 ( \
212         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
213         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
214         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
215         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
216         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
217         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
218         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
219         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
220 )
221
222 #define DC_SC_VL_VAL( \
223         range, \
224         e0, e0val, \
225         e1, e1val, \
226         e2, e2val, \
227         e3, e3val, \
228         e4, e4val, \
229         e5, e5val, \
230         e6, e6val, \
231         e7, e7val, \
232         e8, e8val, \
233         e9, e9val, \
234         e10, e10val, \
235         e11, e11val, \
236         e12, e12val, \
237         e13, e13val, \
238         e14, e14val, \
239         e15, e15val) \
240 ( \
241         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
242         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
243         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
244         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
245         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
246         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
247         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
248         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
249         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
250         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
251         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
252         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
253         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
254         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
255         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
256         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
257 )
258
259 /* all CceStatus sub-block freeze bits */
260 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
261                         | CCE_STATUS_RXE_FROZE_SMASK \
262                         | CCE_STATUS_TXE_FROZE_SMASK \
263                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
264 /* all CceStatus sub-block TXE pause bits */
265 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
266                         | CCE_STATUS_TXE_PAUSED_SMASK \
267                         | CCE_STATUS_SDMA_PAUSED_SMASK)
268 /* all CceStatus sub-block RXE pause bits */
269 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
270
271 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
272 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
273
274 /*
275  * CCE Error flags.
276  */
277 static struct flag_table cce_err_status_flags[] = {
278 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
279                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
280 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
281                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
282 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
283                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
284 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
285                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
286 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
287                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
288 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
289                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
290 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
291                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
292 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
293                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
294 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
295                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
296 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
297             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
298 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
300 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
301             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
302 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
303                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
304 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
305                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
306 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
307                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
308 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
309                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
310 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
311                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
312 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
313                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
314 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
315                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
316 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
317                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
318 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
319                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
320 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
321                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
322 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
323                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
324 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
325                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
326 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
327                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
328 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
329                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
330 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
331                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
332 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
333                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
334 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
335                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
336 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
337                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
338 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
339                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
340 /*31*/  FLAG_ENTRY0("LATriggered",
341                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
342 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
343                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
344 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
345                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
346 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
347                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
348 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
349                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
350 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
351                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
352 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
353                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
354 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
355                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
356 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
357                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
358 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
359                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
360 /*41-63 reserved*/
361 };
362
363 /*
364  * Misc Error flags
365  */
366 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
367 static struct flag_table misc_err_status_flags[] = {
368 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
369 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
370 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
371 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
372 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
373 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
374 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
375 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
376 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
377 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
378 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
379 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
380 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
381 };
382
383 /*
384  * TXE PIO Error flags and consequences
385  */
386 static struct flag_table pio_err_status_flags[] = {
387 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
388         SEC_WRITE_DROPPED,
389         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
390 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
391         SEC_SPC_FREEZE,
392         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
393 /* 2*/  FLAG_ENTRY("PioCsrParity",
394         SEC_SPC_FREEZE,
395         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
396 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
397         SEC_SPC_FREEZE,
398         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
399 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
400         SEC_SPC_FREEZE,
401         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
402 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
403         SEC_SPC_FREEZE,
404         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
405 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
406         SEC_SPC_FREEZE,
407         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
408 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
409         SEC_SPC_FREEZE,
410         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
411 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
412         SEC_SPC_FREEZE,
413         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
414 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
415         SEC_SPC_FREEZE,
416         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
417 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
418         SEC_SPC_FREEZE,
419         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
420 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
421         SEC_SPC_FREEZE,
422         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
423 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
424         SEC_SPC_FREEZE,
425         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
426 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
427         0,
428         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
429 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
430         0,
431         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
432 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
433         SEC_SPC_FREEZE,
434         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
435 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
436         SEC_SPC_FREEZE,
437         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
438 /*17*/  FLAG_ENTRY("PioInitSmIn",
439         0,
440         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
441 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
442         SEC_SPC_FREEZE,
443         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
444 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
445         SEC_SPC_FREEZE,
446         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
447 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
448         0,
449         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
450 /*21*/  FLAG_ENTRY("PioWriteDataParity",
451         SEC_SPC_FREEZE,
452         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
453 /*22*/  FLAG_ENTRY("PioStateMachine",
454         SEC_SPC_FREEZE,
455         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
456 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
457         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
458         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
459 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
460         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
461         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
462 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
463         SEC_SPC_FREEZE,
464         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
465 /*26*/  FLAG_ENTRY("PioVlfSopParity",
466         SEC_SPC_FREEZE,
467         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
468 /*27*/  FLAG_ENTRY("PioVlFifoParity",
469         SEC_SPC_FREEZE,
470         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
471 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
472         SEC_SPC_FREEZE,
473         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
474 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
475         SEC_SPC_FREEZE,
476         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
477 /*30-31 reserved*/
478 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
479         SEC_SPC_FREEZE,
480         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
481 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
482         SEC_SPC_FREEZE,
483         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
484 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
485         SEC_SPC_FREEZE,
486         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
487 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
488         SEC_SPC_FREEZE,
489         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
490 /*36-63 reserved*/
491 };
492
493 /* TXE PIO errors that cause an SPC freeze */
494 #define ALL_PIO_FREEZE_ERR \
495         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
496         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
497         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
498         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
499         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
500         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
501         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
502         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
503         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
504         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
505         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
506         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
507         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
508         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
509         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
510         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
511         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
512         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
513         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
514         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
515         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
516         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
517         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
518         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
519         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
520         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
521         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
522         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
523         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
524
525 /*
526  * TXE SDMA Error flags
527  */
528 static struct flag_table sdma_err_status_flags[] = {
529 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
530                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
531 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
532                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
533 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
534                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
535 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
536                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
537 /*04-63 reserved*/
538 };
539
540 /* TXE SDMA errors that cause an SPC freeze */
541 #define ALL_SDMA_FREEZE_ERR  \
542                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
543                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
544                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
545
546 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
547 #define PORT_DISCARD_EGRESS_ERRS \
548         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
549         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
550         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
551
552 /*
553  * TXE Egress Error flags
554  */
555 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
556 static struct flag_table egress_err_status_flags[] = {
557 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
558 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
559 /* 2 reserved */
560 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
561                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
562 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
563 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
564 /* 6 reserved */
565 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
566                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
567 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
568                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
569 /* 9-10 reserved */
570 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
571                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
572 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
573 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
574 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
575 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
576 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
577                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
578 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
579                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
580 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
581                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
582 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
583                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
584 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
585                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
586 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
587                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
588 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
589                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
590 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
591                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
592 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
593                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
594 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
595                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
596 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
597                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
598 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
599                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
600 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
601                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
602 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
603                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
604 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
605                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
606 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
607                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
608 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
609                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
610 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
611                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
612 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
613                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
614 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
615                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
616 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
617                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
618 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
619                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
620 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
621                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
622 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
623                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
624 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
625                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
626 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
627 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
628 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
629 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
630 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
631 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
632 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
633 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
634 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
635 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
636 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
637 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
638 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
639 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
640 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
641 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
642 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
643 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
644 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
645 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
646 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
647 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
648                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
649 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
650                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
651 };
652
653 /*
654  * TXE Egress Error Info flags
655  */
656 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
657 static struct flag_table egress_err_info_flags[] = {
658 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
659 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
660 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
661 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
663 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
664 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
665 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
666 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
667 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
668 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
669 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
670 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
671 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
672 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
673 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
674 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
675 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
676 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
677 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
678 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
679 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
680 };
681
682 /* TXE Egress errors that cause an SPC freeze */
683 #define ALL_TXE_EGRESS_FREEZE_ERR \
684         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
685         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
686         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
687         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
688         | SEES(TX_LAUNCH_CSR_PARITY) \
689         | SEES(TX_SBRD_CTL_CSR_PARITY) \
690         | SEES(TX_CONFIG_PARITY) \
691         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
692         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
693         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
694         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
695         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
696         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
697         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
698         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
699         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
700         | SEES(TX_CREDIT_RETURN_PARITY))
701
702 /*
703  * TXE Send error flags
704  */
705 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
706 static struct flag_table send_err_status_flags[] = {
707 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
708 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
709 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
710 };
711
712 /*
713  * TXE Send Context Error flags and consequences
714  */
715 static struct flag_table sc_err_status_flags[] = {
716 /* 0*/  FLAG_ENTRY("InconsistentSop",
717                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
718                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
719 /* 1*/  FLAG_ENTRY("DisallowedPacket",
720                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
721                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
722 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
723                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
724                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
725 /* 3*/  FLAG_ENTRY("WriteOverflow",
726                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
727                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
728 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
729                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
730                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
731 /* 5-63 reserved*/
732 };
733
734 /*
735  * RXE Receive Error flags
736  */
737 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
738 static struct flag_table rxe_err_status_flags[] = {
739 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
740 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
741 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
742 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
743 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
744 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
745 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
746 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
747 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
748 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
749 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
750 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
751 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
752 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
753 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
754 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
755 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
756                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
757 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
758 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
759 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
760                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
761 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
762                 RXES(RBUF_BLOCK_LIST_READ_COR)),
763 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
764                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
765 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
766                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
767 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
768                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
769 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
770                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
771 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
772 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
773 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
774                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
775 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
776 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
777 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
778 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
779 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
780 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
781 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
782 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
783                 RXES(RBUF_FL_INITDONE_PARITY)),
784 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
785                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
786 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
787 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
788 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
789 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
790                 RXES(LOOKUP_DES_PART1_UNC_COR)),
791 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
792                 RXES(LOOKUP_DES_PART2_PARITY)),
793 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
794 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
795 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
796 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
797 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
798 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
799 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
800 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
801 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
802 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
803 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
804 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
805 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
806 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
807 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
808 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
809 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
810 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
811 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
812 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
813 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
814 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
815 };
816
817 /* RXE errors that will trigger an SPC freeze */
818 #define ALL_RXE_FREEZE_ERR  \
819         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
820         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
821         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
823         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
824         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
825         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
826         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
827         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
828         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
829         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
830         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
831         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
832         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
833         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
834         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
835         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
836         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
837         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
838         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
839         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
840         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
841         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
842         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
843         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
844         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
845         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
846         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
847         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
848         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
849         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
850         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
851         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
852         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
853         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
854         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
855         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
856         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
857         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
858         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
859         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
860         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
861         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
862         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
863
864 #define RXE_FREEZE_ABORT_MASK \
865         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
866         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
867         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
868
869 /*
870  * DCC Error Flags
871  */
872 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
873 static struct flag_table dcc_err_flags[] = {
874         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
875         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
876         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
877         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
878         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
879         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
880         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
881         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
882         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
883         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
884         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
885         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
886         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
887         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
888         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
889         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
890         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
891         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
892         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
893         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
894         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
895         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
896         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
897         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
898         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
899         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
900         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
901         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
902         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
903         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
904         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
905         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
906         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
907         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
908         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
909         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
910         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
911         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
912         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
913         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
914         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
915         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
916         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
917         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
918         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
919         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
920 };
921
922 /*
923  * LCB error flags
924  */
925 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
926 static struct flag_table lcb_err_flags[] = {
927 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
928 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
929 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
930 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
931                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
932 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
933 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
934 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
935 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
936 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
937 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
938 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
939 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
940 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
941 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
942                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
943 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
944 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
945 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
946 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
947 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
948 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
949                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
950 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
951 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
952 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
953 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
954 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
955 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
956 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
957                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
958 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
959 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
960                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
961 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
962                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
963 };
964
965 /*
966  * DC8051 Error Flags
967  */
968 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
969 static struct flag_table dc8051_err_flags[] = {
970         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
971         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
972         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
973         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
974         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
975         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
976         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
977         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
978         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
979                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
980         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
981 };
982
983 /*
984  * DC8051 Information Error flags
985  *
986  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
987  */
988 static struct flag_table dc8051_info_err_flags[] = {
989         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
990         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
991         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
992         FLAG_ENTRY0("Serdes internal loopback failure",
993                     FAILED_SERDES_INTERNAL_LOOPBACK),
994         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
995         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
996         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
997         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
998         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
999         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1000         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1001         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1002         FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1003         FLAG_ENTRY0("External Device Request Timeout",
1004                     EXTERNAL_DEVICE_REQ_TIMEOUT),
1005 };
1006
1007 /*
1008  * DC8051 Information Host Information flags
1009  *
1010  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1011  */
1012 static struct flag_table dc8051_info_host_msg_flags[] = {
1013         FLAG_ENTRY0("Host request done", 0x0001),
1014         FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1015         FLAG_ENTRY0("BC SMA message", 0x0004),
1016         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1017         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1018         FLAG_ENTRY0("External device config request", 0x0020),
1019         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1020         FLAG_ENTRY0("LinkUp achieved", 0x0080),
1021         FLAG_ENTRY0("Link going down", 0x0100),
1022         FLAG_ENTRY0("Link width downgraded", 0x0200),
1023 };
1024
1025 static u32 encoded_size(u32 size);
1026 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1029                                u8 *continuous);
1030 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033                                       u8 *remote_tx_rate, u16 *link_widths);
1034 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1035                                     u8 *flag_bits, u16 *link_widths);
1036 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1037                                   u8 *device_rev);
1038 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1039 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1040                             u8 *tx_polarity_inversion,
1041                             u8 *rx_polarity_inversion, u8 *max_rate);
1042 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1043                                 unsigned int context, u64 err_status);
1044 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1045 static void handle_dcc_err(struct hfi1_devdata *dd,
1046                            unsigned int context, u64 err_status);
1047 static void handle_lcb_err(struct hfi1_devdata *dd,
1048                            unsigned int context, u64 err_status);
1049 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1050 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void set_partition_keys(struct hfi1_pportdata *ppd);
1058 static const char *link_state_name(u32 state);
1059 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1060                                           u32 state);
1061 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1062                            u64 *out_data);
1063 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1064 static int thermal_init(struct hfi1_devdata *dd);
1065
1066 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1067 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1068                                             int msecs);
1069 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070                                   int msecs);
1071 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1074                                    int msecs);
1075 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1076                                          int msecs);
1077 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079 static void handle_temp_err(struct hfi1_devdata *dd);
1080 static void dc_shutdown(struct hfi1_devdata *dd);
1081 static void dc_start(struct hfi1_devdata *dd);
1082 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083                            unsigned int *np);
1084 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1088
1089 /*
1090  * Error interrupt table entry.  This is used as input to the interrupt
1091  * "clear down" routine used for all second tier error interrupt register.
1092  * Second tier interrupt registers have a single bit representing them
1093  * in the top-level CceIntStatus.
1094  */
1095 struct err_reg_info {
1096         u32 status;             /* status CSR offset */
1097         u32 clear;              /* clear CSR offset */
1098         u32 mask;               /* mask CSR offset */
1099         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100         const char *desc;
1101 };
1102
1103 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1104 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1105 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1106
1107 /*
1108  * Helpers for building HFI and DC error interrupt table entries.  Different
1109  * helpers are needed because of inconsistent register names.
1110  */
1111 #define EE(reg, handler, desc) \
1112         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113                 handler, desc }
1114 #define DC_EE1(reg, handler, desc) \
1115         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116 #define DC_EE2(reg, handler, desc) \
1117         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1118
1119 /*
1120  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1121  * another register containing more information.
1122  */
1123 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1125 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1126 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1127 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1128 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1129 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1130 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1132         /* the rest are reserved */
1133 };
1134
1135 /*
1136  * Index into the Various section of the interrupt sources
1137  * corresponding to the Critical Temperature interrupt.
1138  */
1139 #define TCRIT_INT_SOURCE 4
1140
1141 /*
1142  * SDMA error interrupt entry - refers to another register containing more
1143  * information.
1144  */
1145 static const struct err_reg_info sdma_eng_err =
1146         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147
1148 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1150 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1151 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1152 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1153 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1154         /* rest are reserved */
1155 };
1156
1157 /*
1158  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1159  * register can not be derived from the MTU value because 10K is not
1160  * a power of 2. Therefore, we need a constant. Everything else can
1161  * be calculated.
1162  */
1163 #define DCC_CFG_PORT_MTU_CAP_10240 7
1164
1165 /*
1166  * Table of the DC grouping of error interrupts.  Each entry refers to
1167  * another register containing more information.
1168  */
1169 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1171 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1172 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1173 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1174         /* the rest are reserved */
1175 };
1176
1177 struct cntr_entry {
1178         /*
1179          * counter name
1180          */
1181         char *name;
1182
1183         /*
1184          * csr to read for name (if applicable)
1185          */
1186         u64 csr;
1187
1188         /*
1189          * offset into dd or ppd to store the counter's value
1190          */
1191         int offset;
1192
1193         /*
1194          * flags
1195          */
1196         u8 flags;
1197
1198         /*
1199          * accessor for stat element, context either dd or ppd
1200          */
1201         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202                        int mode, u64 data);
1203 };
1204
1205 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207
1208 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1209 { \
1210         name, \
1211         csr, \
1212         offset, \
1213         flags, \
1214         accessor \
1215 }
1216
1217 /* 32bit RXE */
1218 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219 CNTR_ELEM(#name, \
1220           (counter * 8 + RCV_COUNTER_ARRAY32), \
1221           0, flags | CNTR_32BIT, \
1222           port_access_u32_csr)
1223
1224 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225 CNTR_ELEM(#name, \
1226           (counter * 8 + RCV_COUNTER_ARRAY32), \
1227           0, flags | CNTR_32BIT, \
1228           dev_access_u32_csr)
1229
1230 /* 64bit RXE */
1231 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232 CNTR_ELEM(#name, \
1233           (counter * 8 + RCV_COUNTER_ARRAY64), \
1234           0, flags, \
1235           port_access_u64_csr)
1236
1237 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239           (counter * 8 + RCV_COUNTER_ARRAY64), \
1240           0, flags, \
1241           dev_access_u64_csr)
1242
1243 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244 #define OVR_ELM(ctx) \
1245 CNTR_ELEM("RcvHdrOvr" #ctx, \
1246           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247           0, CNTR_NORMAL, port_access_u64_csr)
1248
1249 /* 32bit TXE */
1250 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251 CNTR_ELEM(#name, \
1252           (counter * 8 + SEND_COUNTER_ARRAY32), \
1253           0, flags | CNTR_32BIT, \
1254           port_access_u32_csr)
1255
1256 /* 64bit TXE */
1257 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258 CNTR_ELEM(#name, \
1259           (counter * 8 + SEND_COUNTER_ARRAY64), \
1260           0, flags, \
1261           port_access_u64_csr)
1262
1263 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264 CNTR_ELEM(#name,\
1265           counter * 8 + SEND_COUNTER_ARRAY64, \
1266           0, \
1267           flags, \
1268           dev_access_u64_csr)
1269
1270 /* CCE */
1271 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272 CNTR_ELEM(#name, \
1273           (counter * 8 + CCE_COUNTER_ARRAY32), \
1274           0, flags | CNTR_32BIT, \
1275           dev_access_u32_csr)
1276
1277 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278 CNTR_ELEM(#name, \
1279           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280           0, flags | CNTR_32BIT, \
1281           dev_access_u32_csr)
1282
1283 /* DC */
1284 #define DC_PERF_CNTR(name, counter, flags) \
1285 CNTR_ELEM(#name, \
1286           counter, \
1287           0, \
1288           flags, \
1289           dev_access_u64_csr)
1290
1291 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1292 CNTR_ELEM(#name, \
1293           counter, \
1294           0, \
1295           flags, \
1296           dc_access_lcb_cntr)
1297
1298 /* ibp counters */
1299 #define SW_IBP_CNTR(name, cntr) \
1300 CNTR_ELEM(#name, \
1301           0, \
1302           0, \
1303           CNTR_SYNTH, \
1304           access_ibp_##cntr)
1305
1306 /**
1307  * hfi_addr_from_offset - return addr for readq/writeq
1308  * @dd - the dd device
1309  * @offset - the offset of the CSR within bar0
1310  *
1311  * This routine selects the appropriate base address
1312  * based on the indicated offset.
1313  */
1314 static inline void __iomem *hfi1_addr_from_offset(
1315         const struct hfi1_devdata *dd,
1316         u32 offset)
1317 {
1318         if (offset >= dd->base2_start)
1319                 return dd->kregbase2 + (offset - dd->base2_start);
1320         return dd->kregbase1 + offset;
1321 }
1322
1323 /**
1324  * read_csr - read CSR at the indicated offset
1325  * @dd - the dd device
1326  * @offset - the offset of the CSR within bar0
1327  *
1328  * Return: the value read or all FF's if there
1329  * is no mapping
1330  */
1331 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332 {
1333         if (dd->flags & HFI1_PRESENT)
1334                 return readq(hfi1_addr_from_offset(dd, offset));
1335         return -1;
1336 }
1337
1338 /**
1339  * write_csr - write CSR at the indicated offset
1340  * @dd - the dd device
1341  * @offset - the offset of the CSR within bar0
1342  * @value - value to write
1343  */
1344 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345 {
1346         if (dd->flags & HFI1_PRESENT) {
1347                 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348
1349                 /* avoid write to RcvArray */
1350                 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351                         return;
1352                 writeq(value, base);
1353         }
1354 }
1355
1356 /**
1357  * get_csr_addr - return te iomem address for offset
1358  * @dd - the dd device
1359  * @offset - the offset of the CSR within bar0
1360  *
1361  * Return: The iomem address to use in subsequent
1362  * writeq/readq operations.
1363  */
1364 void __iomem *get_csr_addr(
1365         const struct hfi1_devdata *dd,
1366         u32 offset)
1367 {
1368         if (dd->flags & HFI1_PRESENT)
1369                 return hfi1_addr_from_offset(dd, offset);
1370         return NULL;
1371 }
1372
1373 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374                                  int mode, u64 value)
1375 {
1376         u64 ret;
1377
1378         if (mode == CNTR_MODE_R) {
1379                 ret = read_csr(dd, csr);
1380         } else if (mode == CNTR_MODE_W) {
1381                 write_csr(dd, csr, value);
1382                 ret = value;
1383         } else {
1384                 dd_dev_err(dd, "Invalid cntr register access mode");
1385                 return 0;
1386         }
1387
1388         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1389         return ret;
1390 }
1391
1392 /* Dev Access */
1393 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394                               void *context, int vl, int mode, u64 data)
1395 {
1396         struct hfi1_devdata *dd = context;
1397         u64 csr = entry->csr;
1398
1399         if (entry->flags & CNTR_SDMA) {
1400                 if (vl == CNTR_INVALID_VL)
1401                         return 0;
1402                 csr += 0x100 * vl;
1403         } else {
1404                 if (vl != CNTR_INVALID_VL)
1405                         return 0;
1406         }
1407         return read_write_csr(dd, csr, mode, data);
1408 }
1409
1410 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411                               void *context, int idx, int mode, u64 data)
1412 {
1413         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414
1415         if (dd->per_sdma && idx < dd->num_sdma)
1416                 return dd->per_sdma[idx].err_cnt;
1417         return 0;
1418 }
1419
1420 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421                               void *context, int idx, int mode, u64 data)
1422 {
1423         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424
1425         if (dd->per_sdma && idx < dd->num_sdma)
1426                 return dd->per_sdma[idx].sdma_int_cnt;
1427         return 0;
1428 }
1429
1430 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431                                    void *context, int idx, int mode, u64 data)
1432 {
1433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434
1435         if (dd->per_sdma && idx < dd->num_sdma)
1436                 return dd->per_sdma[idx].idle_int_cnt;
1437         return 0;
1438 }
1439
1440 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441                                        void *context, int idx, int mode,
1442                                        u64 data)
1443 {
1444         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445
1446         if (dd->per_sdma && idx < dd->num_sdma)
1447                 return dd->per_sdma[idx].progress_int_cnt;
1448         return 0;
1449 }
1450
1451 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452                               int vl, int mode, u64 data)
1453 {
1454         struct hfi1_devdata *dd = context;
1455
1456         u64 val = 0;
1457         u64 csr = entry->csr;
1458
1459         if (entry->flags & CNTR_VL) {
1460                 if (vl == CNTR_INVALID_VL)
1461                         return 0;
1462                 csr += 8 * vl;
1463         } else {
1464                 if (vl != CNTR_INVALID_VL)
1465                         return 0;
1466         }
1467
1468         val = read_write_csr(dd, csr, mode, data);
1469         return val;
1470 }
1471
1472 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473                               int vl, int mode, u64 data)
1474 {
1475         struct hfi1_devdata *dd = context;
1476         u32 csr = entry->csr;
1477         int ret = 0;
1478
1479         if (vl != CNTR_INVALID_VL)
1480                 return 0;
1481         if (mode == CNTR_MODE_R)
1482                 ret = read_lcb_csr(dd, csr, &data);
1483         else if (mode == CNTR_MODE_W)
1484                 ret = write_lcb_csr(dd, csr, data);
1485
1486         if (ret) {
1487                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488                 return 0;
1489         }
1490
1491         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1492         return data;
1493 }
1494
1495 /* Port Access */
1496 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497                                int vl, int mode, u64 data)
1498 {
1499         struct hfi1_pportdata *ppd = context;
1500
1501         if (vl != CNTR_INVALID_VL)
1502                 return 0;
1503         return read_write_csr(ppd->dd, entry->csr, mode, data);
1504 }
1505
1506 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507                                void *context, int vl, int mode, u64 data)
1508 {
1509         struct hfi1_pportdata *ppd = context;
1510         u64 val;
1511         u64 csr = entry->csr;
1512
1513         if (entry->flags & CNTR_VL) {
1514                 if (vl == CNTR_INVALID_VL)
1515                         return 0;
1516                 csr += 8 * vl;
1517         } else {
1518                 if (vl != CNTR_INVALID_VL)
1519                         return 0;
1520         }
1521         val = read_write_csr(ppd->dd, csr, mode, data);
1522         return val;
1523 }
1524
1525 /* Software defined */
1526 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1527                                 u64 data)
1528 {
1529         u64 ret;
1530
1531         if (mode == CNTR_MODE_R) {
1532                 ret = *cntr;
1533         } else if (mode == CNTR_MODE_W) {
1534                 *cntr = data;
1535                 ret = data;
1536         } else {
1537                 dd_dev_err(dd, "Invalid cntr sw access mode");
1538                 return 0;
1539         }
1540
1541         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1542
1543         return ret;
1544 }
1545
1546 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547                                  int vl, int mode, u64 data)
1548 {
1549         struct hfi1_pportdata *ppd = context;
1550
1551         if (vl != CNTR_INVALID_VL)
1552                 return 0;
1553         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1554 }
1555
1556 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557                                  int vl, int mode, u64 data)
1558 {
1559         struct hfi1_pportdata *ppd = context;
1560
1561         if (vl != CNTR_INVALID_VL)
1562                 return 0;
1563         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1564 }
1565
1566 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567                                        void *context, int vl, int mode,
1568                                        u64 data)
1569 {
1570         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571
1572         if (vl != CNTR_INVALID_VL)
1573                 return 0;
1574         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1575 }
1576
1577 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578                                    void *context, int vl, int mode, u64 data)
1579 {
1580         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581         u64 zero = 0;
1582         u64 *counter;
1583
1584         if (vl == CNTR_INVALID_VL)
1585                 counter = &ppd->port_xmit_discards;
1586         else if (vl >= 0 && vl < C_VL_COUNT)
1587                 counter = &ppd->port_xmit_discards_vl[vl];
1588         else
1589                 counter = &zero;
1590
1591         return read_write_sw(ppd->dd, counter, mode, data);
1592 }
1593
1594 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595                                        void *context, int vl, int mode,
1596                                        u64 data)
1597 {
1598         struct hfi1_pportdata *ppd = context;
1599
1600         if (vl != CNTR_INVALID_VL)
1601                 return 0;
1602
1603         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604                              mode, data);
1605 }
1606
1607 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608                                       void *context, int vl, int mode, u64 data)
1609 {
1610         struct hfi1_pportdata *ppd = context;
1611
1612         if (vl != CNTR_INVALID_VL)
1613                 return 0;
1614
1615         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616                              mode, data);
1617 }
1618
1619 u64 get_all_cpu_total(u64 __percpu *cntr)
1620 {
1621         int cpu;
1622         u64 counter = 0;
1623
1624         for_each_possible_cpu(cpu)
1625                 counter += *per_cpu_ptr(cntr, cpu);
1626         return counter;
1627 }
1628
1629 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630                           u64 __percpu *cntr,
1631                           int vl, int mode, u64 data)
1632 {
1633         u64 ret = 0;
1634
1635         if (vl != CNTR_INVALID_VL)
1636                 return 0;
1637
1638         if (mode == CNTR_MODE_R) {
1639                 ret = get_all_cpu_total(cntr) - *z_val;
1640         } else if (mode == CNTR_MODE_W) {
1641                 /* A write can only zero the counter */
1642                 if (data == 0)
1643                         *z_val = get_all_cpu_total(cntr);
1644                 else
1645                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646         } else {
1647                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1648                 return 0;
1649         }
1650
1651         return ret;
1652 }
1653
1654 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655                               void *context, int vl, int mode, u64 data)
1656 {
1657         struct hfi1_devdata *dd = context;
1658
1659         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660                               mode, data);
1661 }
1662
1663 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664                                    void *context, int vl, int mode, u64 data)
1665 {
1666         struct hfi1_devdata *dd = context;
1667
1668         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669                               mode, data);
1670 }
1671
1672 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673                               void *context, int vl, int mode, u64 data)
1674 {
1675         struct hfi1_devdata *dd = context;
1676
1677         return dd->verbs_dev.n_piowait;
1678 }
1679
1680 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681                                void *context, int vl, int mode, u64 data)
1682 {
1683         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685         return dd->verbs_dev.n_piodrain;
1686 }
1687
1688 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1689                               void *context, int vl, int mode, u64 data)
1690 {
1691         struct hfi1_devdata *dd = context;
1692
1693         return dd->verbs_dev.n_txwait;
1694 }
1695
1696 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1697                                void *context, int vl, int mode, u64 data)
1698 {
1699         struct hfi1_devdata *dd = context;
1700
1701         return dd->verbs_dev.n_kmem_wait;
1702 }
1703
1704 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1705                                    void *context, int vl, int mode, u64 data)
1706 {
1707         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708
1709         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1710                               mode, data);
1711 }
1712
1713 /* Software counters for the error status bits within MISC_ERR_STATUS */
1714 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1715                                              void *context, int vl, int mode,
1716                                              u64 data)
1717 {
1718         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719
1720         return dd->misc_err_status_cnt[12];
1721 }
1722
1723 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1724                                           void *context, int vl, int mode,
1725                                           u64 data)
1726 {
1727         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729         return dd->misc_err_status_cnt[11];
1730 }
1731
1732 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1733                                                void *context, int vl, int mode,
1734                                                u64 data)
1735 {
1736         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737
1738         return dd->misc_err_status_cnt[10];
1739 }
1740
1741 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1742                                                  void *context, int vl,
1743                                                  int mode, u64 data)
1744 {
1745         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746
1747         return dd->misc_err_status_cnt[9];
1748 }
1749
1750 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1751                                            void *context, int vl, int mode,
1752                                            u64 data)
1753 {
1754         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755
1756         return dd->misc_err_status_cnt[8];
1757 }
1758
1759 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1760                                 const struct cntr_entry *entry,
1761                                 void *context, int vl, int mode, u64 data)
1762 {
1763         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764
1765         return dd->misc_err_status_cnt[7];
1766 }
1767
1768 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1769                                                 void *context, int vl,
1770                                                 int mode, u64 data)
1771 {
1772         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774         return dd->misc_err_status_cnt[6];
1775 }
1776
1777 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1778                                               void *context, int vl, int mode,
1779                                               u64 data)
1780 {
1781         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783         return dd->misc_err_status_cnt[5];
1784 }
1785
1786 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1787                                             void *context, int vl, int mode,
1788                                             u64 data)
1789 {
1790         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792         return dd->misc_err_status_cnt[4];
1793 }
1794
1795 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1796                                                  void *context, int vl,
1797                                                  int mode, u64 data)
1798 {
1799         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801         return dd->misc_err_status_cnt[3];
1802 }
1803
1804 static u64 access_misc_csr_write_bad_addr_err_cnt(
1805                                 const struct cntr_entry *entry,
1806                                 void *context, int vl, int mode, u64 data)
1807 {
1808         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810         return dd->misc_err_status_cnt[2];
1811 }
1812
1813 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1814                                                  void *context, int vl,
1815                                                  int mode, u64 data)
1816 {
1817         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819         return dd->misc_err_status_cnt[1];
1820 }
1821
1822 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1823                                           void *context, int vl, int mode,
1824                                           u64 data)
1825 {
1826         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828         return dd->misc_err_status_cnt[0];
1829 }
1830
1831 /*
1832  * Software counter for the aggregate of
1833  * individual CceErrStatus counters
1834  */
1835 static u64 access_sw_cce_err_status_aggregated_cnt(
1836                                 const struct cntr_entry *entry,
1837                                 void *context, int vl, int mode, u64 data)
1838 {
1839         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841         return dd->sw_cce_err_status_aggregate;
1842 }
1843
1844 /*
1845  * Software counters corresponding to each of the
1846  * error status bits within CceErrStatus
1847  */
1848 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1849                                               void *context, int vl, int mode,
1850                                               u64 data)
1851 {
1852         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853
1854         return dd->cce_err_status_cnt[40];
1855 }
1856
1857 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1858                                           void *context, int vl, int mode,
1859                                           u64 data)
1860 {
1861         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862
1863         return dd->cce_err_status_cnt[39];
1864 }
1865
1866 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1867                                           void *context, int vl, int mode,
1868                                           u64 data)
1869 {
1870         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871
1872         return dd->cce_err_status_cnt[38];
1873 }
1874
1875 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1876                                              void *context, int vl, int mode,
1877                                              u64 data)
1878 {
1879         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881         return dd->cce_err_status_cnt[37];
1882 }
1883
1884 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1885                                              void *context, int vl, int mode,
1886                                              u64 data)
1887 {
1888         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890         return dd->cce_err_status_cnt[36];
1891 }
1892
1893 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1894                                 const struct cntr_entry *entry,
1895                                 void *context, int vl, int mode, u64 data)
1896 {
1897         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899         return dd->cce_err_status_cnt[35];
1900 }
1901
1902 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1903                                 const struct cntr_entry *entry,
1904                                 void *context, int vl, int mode, u64 data)
1905 {
1906         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908         return dd->cce_err_status_cnt[34];
1909 }
1910
1911 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1912                                                  void *context, int vl,
1913                                                  int mode, u64 data)
1914 {
1915         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917         return dd->cce_err_status_cnt[33];
1918 }
1919
1920 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1921                                                 void *context, int vl, int mode,
1922                                                 u64 data)
1923 {
1924         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926         return dd->cce_err_status_cnt[32];
1927 }
1928
1929 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1930                                    void *context, int vl, int mode, u64 data)
1931 {
1932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934         return dd->cce_err_status_cnt[31];
1935 }
1936
1937 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1938                                                void *context, int vl, int mode,
1939                                                u64 data)
1940 {
1941         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943         return dd->cce_err_status_cnt[30];
1944 }
1945
1946 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1947                                               void *context, int vl, int mode,
1948                                               u64 data)
1949 {
1950         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952         return dd->cce_err_status_cnt[29];
1953 }
1954
1955 static u64 access_pcic_transmit_back_parity_err_cnt(
1956                                 const struct cntr_entry *entry,
1957                                 void *context, int vl, int mode, u64 data)
1958 {
1959         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961         return dd->cce_err_status_cnt[28];
1962 }
1963
1964 static u64 access_pcic_transmit_front_parity_err_cnt(
1965                                 const struct cntr_entry *entry,
1966                                 void *context, int vl, int mode, u64 data)
1967 {
1968         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970         return dd->cce_err_status_cnt[27];
1971 }
1972
1973 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1974                                              void *context, int vl, int mode,
1975                                              u64 data)
1976 {
1977         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979         return dd->cce_err_status_cnt[26];
1980 }
1981
1982 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1983                                             void *context, int vl, int mode,
1984                                             u64 data)
1985 {
1986         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988         return dd->cce_err_status_cnt[25];
1989 }
1990
1991 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1992                                               void *context, int vl, int mode,
1993                                               u64 data)
1994 {
1995         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997         return dd->cce_err_status_cnt[24];
1998 }
1999
2000 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2001                                              void *context, int vl, int mode,
2002                                              u64 data)
2003 {
2004         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006         return dd->cce_err_status_cnt[23];
2007 }
2008
2009 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2010                                                  void *context, int vl,
2011                                                  int mode, u64 data)
2012 {
2013         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015         return dd->cce_err_status_cnt[22];
2016 }
2017
2018 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2019                                          void *context, int vl, int mode,
2020                                          u64 data)
2021 {
2022         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024         return dd->cce_err_status_cnt[21];
2025 }
2026
2027 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2028                                 const struct cntr_entry *entry,
2029                                 void *context, int vl, int mode, u64 data)
2030 {
2031         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033         return dd->cce_err_status_cnt[20];
2034 }
2035
2036 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2037                                                  void *context, int vl,
2038                                                  int mode, u64 data)
2039 {
2040         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042         return dd->cce_err_status_cnt[19];
2043 }
2044
2045 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2046                                              void *context, int vl, int mode,
2047                                              u64 data)
2048 {
2049         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050
2051         return dd->cce_err_status_cnt[18];
2052 }
2053
2054 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2055                                             void *context, int vl, int mode,
2056                                             u64 data)
2057 {
2058         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059
2060         return dd->cce_err_status_cnt[17];
2061 }
2062
2063 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2064                                               void *context, int vl, int mode,
2065                                               u64 data)
2066 {
2067         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068
2069         return dd->cce_err_status_cnt[16];
2070 }
2071
2072 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2073                                              void *context, int vl, int mode,
2074                                              u64 data)
2075 {
2076         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077
2078         return dd->cce_err_status_cnt[15];
2079 }
2080
2081 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2082                                                  void *context, int vl,
2083                                                  int mode, u64 data)
2084 {
2085         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086
2087         return dd->cce_err_status_cnt[14];
2088 }
2089
2090 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2091                                              void *context, int vl, int mode,
2092                                              u64 data)
2093 {
2094         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095
2096         return dd->cce_err_status_cnt[13];
2097 }
2098
2099 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2100                                 const struct cntr_entry *entry,
2101                                 void *context, int vl, int mode, u64 data)
2102 {
2103         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104
2105         return dd->cce_err_status_cnt[12];
2106 }
2107
2108 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2109                                 const struct cntr_entry *entry,
2110                                 void *context, int vl, int mode, u64 data)
2111 {
2112         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114         return dd->cce_err_status_cnt[11];
2115 }
2116
2117 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2118                                 const struct cntr_entry *entry,
2119                                 void *context, int vl, int mode, u64 data)
2120 {
2121         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122
2123         return dd->cce_err_status_cnt[10];
2124 }
2125
2126 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2127                                 const struct cntr_entry *entry,
2128                                 void *context, int vl, int mode, u64 data)
2129 {
2130         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132         return dd->cce_err_status_cnt[9];
2133 }
2134
2135 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2136                                 const struct cntr_entry *entry,
2137                                 void *context, int vl, int mode, u64 data)
2138 {
2139         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141         return dd->cce_err_status_cnt[8];
2142 }
2143
2144 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2145                                                  void *context, int vl,
2146                                                  int mode, u64 data)
2147 {
2148         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150         return dd->cce_err_status_cnt[7];
2151 }
2152
2153 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2154                                 const struct cntr_entry *entry,
2155                                 void *context, int vl, int mode, u64 data)
2156 {
2157         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159         return dd->cce_err_status_cnt[6];
2160 }
2161
2162 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2163                                                void *context, int vl, int mode,
2164                                                u64 data)
2165 {
2166         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168         return dd->cce_err_status_cnt[5];
2169 }
2170
2171 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2172                                           void *context, int vl, int mode,
2173                                           u64 data)
2174 {
2175         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177         return dd->cce_err_status_cnt[4];
2178 }
2179
2180 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2181                                 const struct cntr_entry *entry,
2182                                 void *context, int vl, int mode, u64 data)
2183 {
2184         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186         return dd->cce_err_status_cnt[3];
2187 }
2188
2189 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2190                                                  void *context, int vl,
2191                                                  int mode, u64 data)
2192 {
2193         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195         return dd->cce_err_status_cnt[2];
2196 }
2197
2198 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2199                                                 void *context, int vl,
2200                                                 int mode, u64 data)
2201 {
2202         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204         return dd->cce_err_status_cnt[1];
2205 }
2206
2207 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2208                                          void *context, int vl, int mode,
2209                                          u64 data)
2210 {
2211         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213         return dd->cce_err_status_cnt[0];
2214 }
2215
2216 /*
2217  * Software counters corresponding to each of the
2218  * error status bits within RcvErrStatus
2219  */
2220 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2221                                         void *context, int vl, int mode,
2222                                         u64 data)
2223 {
2224         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226         return dd->rcv_err_status_cnt[63];
2227 }
2228
2229 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2230                                                 void *context, int vl,
2231                                                 int mode, u64 data)
2232 {
2233         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235         return dd->rcv_err_status_cnt[62];
2236 }
2237
2238 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2239                                                void *context, int vl, int mode,
2240                                                u64 data)
2241 {
2242         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244         return dd->rcv_err_status_cnt[61];
2245 }
2246
2247 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2248                                          void *context, int vl, int mode,
2249                                          u64 data)
2250 {
2251         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253         return dd->rcv_err_status_cnt[60];
2254 }
2255
2256 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2257                                                  void *context, int vl,
2258                                                  int mode, u64 data)
2259 {
2260         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262         return dd->rcv_err_status_cnt[59];
2263 }
2264
2265 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266                                                  void *context, int vl,
2267                                                  int mode, u64 data)
2268 {
2269         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271         return dd->rcv_err_status_cnt[58];
2272 }
2273
2274 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2275                                             void *context, int vl, int mode,
2276                                             u64 data)
2277 {
2278         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280         return dd->rcv_err_status_cnt[57];
2281 }
2282
2283 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2284                                            void *context, int vl, int mode,
2285                                            u64 data)
2286 {
2287         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289         return dd->rcv_err_status_cnt[56];
2290 }
2291
2292 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2293                                            void *context, int vl, int mode,
2294                                            u64 data)
2295 {
2296         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298         return dd->rcv_err_status_cnt[55];
2299 }
2300
2301 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2302                                 const struct cntr_entry *entry,
2303                                 void *context, int vl, int mode, u64 data)
2304 {
2305         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307         return dd->rcv_err_status_cnt[54];
2308 }
2309
2310 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2311                                 const struct cntr_entry *entry,
2312                                 void *context, int vl, int mode, u64 data)
2313 {
2314         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316         return dd->rcv_err_status_cnt[53];
2317 }
2318
2319 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2320                                                  void *context, int vl,
2321                                                  int mode, u64 data)
2322 {
2323         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325         return dd->rcv_err_status_cnt[52];
2326 }
2327
2328 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2329                                                  void *context, int vl,
2330                                                  int mode, u64 data)
2331 {
2332         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334         return dd->rcv_err_status_cnt[51];
2335 }
2336
2337 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2338                                                  void *context, int vl,
2339                                                  int mode, u64 data)
2340 {
2341         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343         return dd->rcv_err_status_cnt[50];
2344 }
2345
2346 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2347                                                  void *context, int vl,
2348                                                  int mode, u64 data)
2349 {
2350         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352         return dd->rcv_err_status_cnt[49];
2353 }
2354
2355 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2356                                                  void *context, int vl,
2357                                                  int mode, u64 data)
2358 {
2359         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361         return dd->rcv_err_status_cnt[48];
2362 }
2363
2364 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2365                                                  void *context, int vl,
2366                                                  int mode, u64 data)
2367 {
2368         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370         return dd->rcv_err_status_cnt[47];
2371 }
2372
2373 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2374                                          void *context, int vl, int mode,
2375                                          u64 data)
2376 {
2377         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379         return dd->rcv_err_status_cnt[46];
2380 }
2381
2382 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2383                                 const struct cntr_entry *entry,
2384                                 void *context, int vl, int mode, u64 data)
2385 {
2386         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388         return dd->rcv_err_status_cnt[45];
2389 }
2390
2391 static u64 access_rx_lookup_csr_parity_err_cnt(
2392                                 const struct cntr_entry *entry,
2393                                 void *context, int vl, int mode, u64 data)
2394 {
2395         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397         return dd->rcv_err_status_cnt[44];
2398 }
2399
2400 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2401                                 const struct cntr_entry *entry,
2402                                 void *context, int vl, int mode, u64 data)
2403 {
2404         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406         return dd->rcv_err_status_cnt[43];
2407 }
2408
2409 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2410                                 const struct cntr_entry *entry,
2411                                 void *context, int vl, int mode, u64 data)
2412 {
2413         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415         return dd->rcv_err_status_cnt[42];
2416 }
2417
2418 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2419                                 const struct cntr_entry *entry,
2420                                 void *context, int vl, int mode, u64 data)
2421 {
2422         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424         return dd->rcv_err_status_cnt[41];
2425 }
2426
2427 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2428                                 const struct cntr_entry *entry,
2429                                 void *context, int vl, int mode, u64 data)
2430 {
2431         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433         return dd->rcv_err_status_cnt[40];
2434 }
2435
2436 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2437                                 const struct cntr_entry *entry,
2438                                 void *context, int vl, int mode, u64 data)
2439 {
2440         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442         return dd->rcv_err_status_cnt[39];
2443 }
2444
2445 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2446                                 const struct cntr_entry *entry,
2447                                 void *context, int vl, int mode, u64 data)
2448 {
2449         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451         return dd->rcv_err_status_cnt[38];
2452 }
2453
2454 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2455                                 const struct cntr_entry *entry,
2456                                 void *context, int vl, int mode, u64 data)
2457 {
2458         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460         return dd->rcv_err_status_cnt[37];
2461 }
2462
2463 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2464                                 const struct cntr_entry *entry,
2465                                 void *context, int vl, int mode, u64 data)
2466 {
2467         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469         return dd->rcv_err_status_cnt[36];
2470 }
2471
2472 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2473                                 const struct cntr_entry *entry,
2474                                 void *context, int vl, int mode, u64 data)
2475 {
2476         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478         return dd->rcv_err_status_cnt[35];
2479 }
2480
2481 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2482                                 const struct cntr_entry *entry,
2483                                 void *context, int vl, int mode, u64 data)
2484 {
2485         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487         return dd->rcv_err_status_cnt[34];
2488 }
2489
2490 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2491                                 const struct cntr_entry *entry,
2492                                 void *context, int vl, int mode, u64 data)
2493 {
2494         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496         return dd->rcv_err_status_cnt[33];
2497 }
2498
2499 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2500                                         void *context, int vl, int mode,
2501                                         u64 data)
2502 {
2503         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505         return dd->rcv_err_status_cnt[32];
2506 }
2507
2508 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2509                                        void *context, int vl, int mode,
2510                                        u64 data)
2511 {
2512         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514         return dd->rcv_err_status_cnt[31];
2515 }
2516
2517 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2518                                           void *context, int vl, int mode,
2519                                           u64 data)
2520 {
2521         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523         return dd->rcv_err_status_cnt[30];
2524 }
2525
2526 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2527                                              void *context, int vl, int mode,
2528                                              u64 data)
2529 {
2530         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532         return dd->rcv_err_status_cnt[29];
2533 }
2534
2535 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2536                                                  void *context, int vl,
2537                                                  int mode, u64 data)
2538 {
2539         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541         return dd->rcv_err_status_cnt[28];
2542 }
2543
2544 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2545                                 const struct cntr_entry *entry,
2546                                 void *context, int vl, int mode, u64 data)
2547 {
2548         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550         return dd->rcv_err_status_cnt[27];
2551 }
2552
2553 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2554                                 const struct cntr_entry *entry,
2555                                 void *context, int vl, int mode, u64 data)
2556 {
2557         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559         return dd->rcv_err_status_cnt[26];
2560 }
2561
2562 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2563                                 const struct cntr_entry *entry,
2564                                 void *context, int vl, int mode, u64 data)
2565 {
2566         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568         return dd->rcv_err_status_cnt[25];
2569 }
2570
2571 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2572                                 const struct cntr_entry *entry,
2573                                 void *context, int vl, int mode, u64 data)
2574 {
2575         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577         return dd->rcv_err_status_cnt[24];
2578 }
2579
2580 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2581                                 const struct cntr_entry *entry,
2582                                 void *context, int vl, int mode, u64 data)
2583 {
2584         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586         return dd->rcv_err_status_cnt[23];
2587 }
2588
2589 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2590                                 const struct cntr_entry *entry,
2591                                 void *context, int vl, int mode, u64 data)
2592 {
2593         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595         return dd->rcv_err_status_cnt[22];
2596 }
2597
2598 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2599                                 const struct cntr_entry *entry,
2600                                 void *context, int vl, int mode, u64 data)
2601 {
2602         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604         return dd->rcv_err_status_cnt[21];
2605 }
2606
2607 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2608                                 const struct cntr_entry *entry,
2609                                 void *context, int vl, int mode, u64 data)
2610 {
2611         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613         return dd->rcv_err_status_cnt[20];
2614 }
2615
2616 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2617                                 const struct cntr_entry *entry,
2618                                 void *context, int vl, int mode, u64 data)
2619 {
2620         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622         return dd->rcv_err_status_cnt[19];
2623 }
2624
2625 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2626                                                  void *context, int vl,
2627                                                  int mode, u64 data)
2628 {
2629         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630
2631         return dd->rcv_err_status_cnt[18];
2632 }
2633
2634 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2635                                                  void *context, int vl,
2636                                                  int mode, u64 data)
2637 {
2638         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639
2640         return dd->rcv_err_status_cnt[17];
2641 }
2642
2643 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2644                                 const struct cntr_entry *entry,
2645                                 void *context, int vl, int mode, u64 data)
2646 {
2647         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648
2649         return dd->rcv_err_status_cnt[16];
2650 }
2651
2652 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2653                                 const struct cntr_entry *entry,
2654                                 void *context, int vl, int mode, u64 data)
2655 {
2656         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657
2658         return dd->rcv_err_status_cnt[15];
2659 }
2660
2661 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2662                                                 void *context, int vl,
2663                                                 int mode, u64 data)
2664 {
2665         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666
2667         return dd->rcv_err_status_cnt[14];
2668 }
2669
2670 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2671                                                 void *context, int vl,
2672                                                 int mode, u64 data)
2673 {
2674         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675
2676         return dd->rcv_err_status_cnt[13];
2677 }
2678
2679 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2680                                               void *context, int vl, int mode,
2681                                               u64 data)
2682 {
2683         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684
2685         return dd->rcv_err_status_cnt[12];
2686 }
2687
2688 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2689                                           void *context, int vl, int mode,
2690                                           u64 data)
2691 {
2692         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694         return dd->rcv_err_status_cnt[11];
2695 }
2696
2697 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2698                                           void *context, int vl, int mode,
2699                                           u64 data)
2700 {
2701         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702
2703         return dd->rcv_err_status_cnt[10];
2704 }
2705
2706 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2707                                                void *context, int vl, int mode,
2708                                                u64 data)
2709 {
2710         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712         return dd->rcv_err_status_cnt[9];
2713 }
2714
2715 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2716                                             void *context, int vl, int mode,
2717                                             u64 data)
2718 {
2719         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721         return dd->rcv_err_status_cnt[8];
2722 }
2723
2724 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2725                                 const struct cntr_entry *entry,
2726                                 void *context, int vl, int mode, u64 data)
2727 {
2728         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730         return dd->rcv_err_status_cnt[7];
2731 }
2732
2733 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2734                                 const struct cntr_entry *entry,
2735                                 void *context, int vl, int mode, u64 data)
2736 {
2737         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739         return dd->rcv_err_status_cnt[6];
2740 }
2741
2742 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2743                                           void *context, int vl, int mode,
2744                                           u64 data)
2745 {
2746         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748         return dd->rcv_err_status_cnt[5];
2749 }
2750
2751 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2752                                           void *context, int vl, int mode,
2753                                           u64 data)
2754 {
2755         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757         return dd->rcv_err_status_cnt[4];
2758 }
2759
2760 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2761                                          void *context, int vl, int mode,
2762                                          u64 data)
2763 {
2764         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766         return dd->rcv_err_status_cnt[3];
2767 }
2768
2769 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2770                                          void *context, int vl, int mode,
2771                                          u64 data)
2772 {
2773         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775         return dd->rcv_err_status_cnt[2];
2776 }
2777
2778 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2779                                             void *context, int vl, int mode,
2780                                             u64 data)
2781 {
2782         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784         return dd->rcv_err_status_cnt[1];
2785 }
2786
2787 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2788                                          void *context, int vl, int mode,
2789                                          u64 data)
2790 {
2791         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793         return dd->rcv_err_status_cnt[0];
2794 }
2795
2796 /*
2797  * Software counters corresponding to each of the
2798  * error status bits within SendPioErrStatus
2799  */
2800 static u64 access_pio_pec_sop_head_parity_err_cnt(
2801                                 const struct cntr_entry *entry,
2802                                 void *context, int vl, int mode, u64 data)
2803 {
2804         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806         return dd->send_pio_err_status_cnt[35];
2807 }
2808
2809 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2810                                 const struct cntr_entry *entry,
2811                                 void *context, int vl, int mode, u64 data)
2812 {
2813         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815         return dd->send_pio_err_status_cnt[34];
2816 }
2817
2818 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2819                                 const struct cntr_entry *entry,
2820                                 void *context, int vl, int mode, u64 data)
2821 {
2822         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824         return dd->send_pio_err_status_cnt[33];
2825 }
2826
2827 static u64 access_pio_current_free_cnt_parity_err_cnt(
2828                                 const struct cntr_entry *entry,
2829                                 void *context, int vl, int mode, u64 data)
2830 {
2831         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833         return dd->send_pio_err_status_cnt[32];
2834 }
2835
2836 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2837                                           void *context, int vl, int mode,
2838                                           u64 data)
2839 {
2840         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842         return dd->send_pio_err_status_cnt[31];
2843 }
2844
2845 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2846                                           void *context, int vl, int mode,
2847                                           u64 data)
2848 {
2849         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851         return dd->send_pio_err_status_cnt[30];
2852 }
2853
2854 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2855                                            void *context, int vl, int mode,
2856                                            u64 data)
2857 {
2858         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860         return dd->send_pio_err_status_cnt[29];
2861 }
2862
2863 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2864                                 const struct cntr_entry *entry,
2865                                 void *context, int vl, int mode, u64 data)
2866 {
2867         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869         return dd->send_pio_err_status_cnt[28];
2870 }
2871
2872 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2873                                              void *context, int vl, int mode,
2874                                              u64 data)
2875 {
2876         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878         return dd->send_pio_err_status_cnt[27];
2879 }
2880
2881 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2882                                              void *context, int vl, int mode,
2883                                              u64 data)
2884 {
2885         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887         return dd->send_pio_err_status_cnt[26];
2888 }
2889
2890 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2891                                                 void *context, int vl,
2892                                                 int mode, u64 data)
2893 {
2894         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896         return dd->send_pio_err_status_cnt[25];
2897 }
2898
2899 static u64 access_pio_block_qw_count_parity_err_cnt(
2900                                 const struct cntr_entry *entry,
2901                                 void *context, int vl, int mode, u64 data)
2902 {
2903         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905         return dd->send_pio_err_status_cnt[24];
2906 }
2907
2908 static u64 access_pio_write_qw_valid_parity_err_cnt(
2909                                 const struct cntr_entry *entry,
2910                                 void *context, int vl, int mode, u64 data)
2911 {
2912         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914         return dd->send_pio_err_status_cnt[23];
2915 }
2916
2917 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2918                                             void *context, int vl, int mode,
2919                                             u64 data)
2920 {
2921         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923         return dd->send_pio_err_status_cnt[22];
2924 }
2925
2926 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2927                                                 void *context, int vl,
2928                                                 int mode, u64 data)
2929 {
2930         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932         return dd->send_pio_err_status_cnt[21];
2933 }
2934
2935 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2936                                                 void *context, int vl,
2937                                                 int mode, u64 data)
2938 {
2939         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941         return dd->send_pio_err_status_cnt[20];
2942 }
2943
2944 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2945                                                 void *context, int vl,
2946                                                 int mode, u64 data)
2947 {
2948         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950         return dd->send_pio_err_status_cnt[19];
2951 }
2952
2953 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2954                                 const struct cntr_entry *entry,
2955                                 void *context, int vl, int mode, u64 data)
2956 {
2957         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958
2959         return dd->send_pio_err_status_cnt[18];
2960 }
2961
2962 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2963                                          void *context, int vl, int mode,
2964                                          u64 data)
2965 {
2966         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967
2968         return dd->send_pio_err_status_cnt[17];
2969 }
2970
2971 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2972                                             void *context, int vl, int mode,
2973                                             u64 data)
2974 {
2975         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976
2977         return dd->send_pio_err_status_cnt[16];
2978 }
2979
2980 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2981                                 const struct cntr_entry *entry,
2982                                 void *context, int vl, int mode, u64 data)
2983 {
2984         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985
2986         return dd->send_pio_err_status_cnt[15];
2987 }
2988
2989 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2990                                 const struct cntr_entry *entry,
2991                                 void *context, int vl, int mode, u64 data)
2992 {
2993         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994
2995         return dd->send_pio_err_status_cnt[14];
2996 }
2997
2998 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2999                                 const struct cntr_entry *entry,
3000                                 void *context, int vl, int mode, u64 data)
3001 {
3002         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003
3004         return dd->send_pio_err_status_cnt[13];
3005 }
3006
3007 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3008                                 const struct cntr_entry *entry,
3009                                 void *context, int vl, int mode, u64 data)
3010 {
3011         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012
3013         return dd->send_pio_err_status_cnt[12];
3014 }
3015
3016 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3017                                 const struct cntr_entry *entry,
3018                                 void *context, int vl, int mode, u64 data)
3019 {
3020         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022         return dd->send_pio_err_status_cnt[11];
3023 }
3024
3025 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3026                                 const struct cntr_entry *entry,
3027                                 void *context, int vl, int mode, u64 data)
3028 {
3029         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030
3031         return dd->send_pio_err_status_cnt[10];
3032 }
3033
3034 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3035                                 const struct cntr_entry *entry,
3036                                 void *context, int vl, int mode, u64 data)
3037 {
3038         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040         return dd->send_pio_err_status_cnt[9];
3041 }
3042
3043 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3044                                 const struct cntr_entry *entry,
3045                                 void *context, int vl, int mode, u64 data)
3046 {
3047         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049         return dd->send_pio_err_status_cnt[8];
3050 }
3051
3052 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3053                                 const struct cntr_entry *entry,
3054                                 void *context, int vl, int mode, u64 data)
3055 {
3056         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058         return dd->send_pio_err_status_cnt[7];
3059 }
3060
3061 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3062                                               void *context, int vl, int mode,
3063                                               u64 data)
3064 {
3065         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067         return dd->send_pio_err_status_cnt[6];
3068 }
3069
3070 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071                                               void *context, int vl, int mode,
3072                                               u64 data)
3073 {
3074         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075
3076         return dd->send_pio_err_status_cnt[5];
3077 }
3078
3079 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3080                                            void *context, int vl, int mode,
3081                                            u64 data)
3082 {
3083         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084
3085         return dd->send_pio_err_status_cnt[4];
3086 }
3087
3088 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3089                                            void *context, int vl, int mode,
3090                                            u64 data)
3091 {
3092         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093
3094         return dd->send_pio_err_status_cnt[3];
3095 }
3096
3097 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3098                                          void *context, int vl, int mode,
3099                                          u64 data)
3100 {
3101         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102
3103         return dd->send_pio_err_status_cnt[2];
3104 }
3105
3106 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3107                                                 void *context, int vl,
3108                                                 int mode, u64 data)
3109 {
3110         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111
3112         return dd->send_pio_err_status_cnt[1];
3113 }
3114
3115 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3116                                              void *context, int vl, int mode,
3117                                              u64 data)
3118 {
3119         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121         return dd->send_pio_err_status_cnt[0];
3122 }
3123
3124 /*
3125  * Software counters corresponding to each of the
3126  * error status bits within SendDmaErrStatus
3127  */
3128 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3129                                 const struct cntr_entry *entry,
3130                                 void *context, int vl, int mode, u64 data)
3131 {
3132         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134         return dd->send_dma_err_status_cnt[3];
3135 }
3136
3137 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3138                                 const struct cntr_entry *entry,
3139                                 void *context, int vl, int mode, u64 data)
3140 {
3141         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143         return dd->send_dma_err_status_cnt[2];
3144 }
3145
3146 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3147                                           void *context, int vl, int mode,
3148                                           u64 data)
3149 {
3150         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152         return dd->send_dma_err_status_cnt[1];
3153 }
3154
3155 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3156                                        void *context, int vl, int mode,
3157                                        u64 data)
3158 {
3159         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161         return dd->send_dma_err_status_cnt[0];
3162 }
3163
3164 /*
3165  * Software counters corresponding to each of the
3166  * error status bits within SendEgressErrStatus
3167  */
3168 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3169                                 const struct cntr_entry *entry,
3170                                 void *context, int vl, int mode, u64 data)
3171 {
3172         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174         return dd->send_egress_err_status_cnt[63];
3175 }
3176
3177 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3178                                 const struct cntr_entry *entry,
3179                                 void *context, int vl, int mode, u64 data)
3180 {
3181         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183         return dd->send_egress_err_status_cnt[62];
3184 }
3185
3186 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3187                                              void *context, int vl, int mode,
3188                                              u64 data)
3189 {
3190         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192         return dd->send_egress_err_status_cnt[61];
3193 }
3194
3195 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3196                                                  void *context, int vl,
3197                                                  int mode, u64 data)
3198 {
3199         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201         return dd->send_egress_err_status_cnt[60];
3202 }
3203
3204 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3205                                 const struct cntr_entry *entry,
3206                                 void *context, int vl, int mode, u64 data)
3207 {
3208         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210         return dd->send_egress_err_status_cnt[59];
3211 }
3212
3213 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3214                                         void *context, int vl, int mode,
3215                                         u64 data)
3216 {
3217         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219         return dd->send_egress_err_status_cnt[58];
3220 }
3221
3222 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3223                                             void *context, int vl, int mode,
3224                                             u64 data)
3225 {
3226         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228         return dd->send_egress_err_status_cnt[57];
3229 }
3230
3231 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3232                                               void *context, int vl, int mode,
3233                                               u64 data)
3234 {
3235         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237         return dd->send_egress_err_status_cnt[56];
3238 }
3239
3240 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3241                                               void *context, int vl, int mode,
3242                                               u64 data)
3243 {
3244         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246         return dd->send_egress_err_status_cnt[55];
3247 }
3248
3249 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3250                                               void *context, int vl, int mode,
3251                                               u64 data)
3252 {
3253         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255         return dd->send_egress_err_status_cnt[54];
3256 }
3257
3258 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3259                                               void *context, int vl, int mode,
3260                                               u64 data)
3261 {
3262         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264         return dd->send_egress_err_status_cnt[53];
3265 }
3266
3267 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3268                                               void *context, int vl, int mode,
3269                                               u64 data)
3270 {
3271         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273         return dd->send_egress_err_status_cnt[52];
3274 }
3275
3276 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3277                                               void *context, int vl, int mode,
3278                                               u64 data)
3279 {
3280         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282         return dd->send_egress_err_status_cnt[51];
3283 }
3284
3285 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3286                                               void *context, int vl, int mode,
3287                                               u64 data)
3288 {
3289         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291         return dd->send_egress_err_status_cnt[50];
3292 }
3293
3294 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3295                                               void *context, int vl, int mode,
3296                                               u64 data)
3297 {
3298         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300         return dd->send_egress_err_status_cnt[49];
3301 }
3302
3303 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3304                                               void *context, int vl, int mode,
3305                                               u64 data)
3306 {
3307         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309         return dd->send_egress_err_status_cnt[48];
3310 }
3311
3312 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3313                                               void *context, int vl, int mode,
3314                                               u64 data)
3315 {
3316         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318         return dd->send_egress_err_status_cnt[47];
3319 }
3320
3321 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3322                                             void *context, int vl, int mode,
3323                                             u64 data)
3324 {
3325         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327         return dd->send_egress_err_status_cnt[46];
3328 }
3329
3330 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3331                                              void *context, int vl, int mode,
3332                                              u64 data)
3333 {
3334         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336         return dd->send_egress_err_status_cnt[45];
3337 }
3338
3339 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3340                                                  void *context, int vl,
3341                                                  int mode, u64 data)
3342 {
3343         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345         return dd->send_egress_err_status_cnt[44];
3346 }
3347
3348 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3349                                 const struct cntr_entry *entry,
3350                                 void *context, int vl, int mode, u64 data)
3351 {
3352         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354         return dd->send_egress_err_status_cnt[43];
3355 }
3356
3357 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3358                                         void *context, int vl, int mode,
3359                                         u64 data)
3360 {
3361         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363         return dd->send_egress_err_status_cnt[42];
3364 }
3365
3366 static u64 access_tx_credit_return_partiy_err_cnt(
3367                                 const struct cntr_entry *entry,
3368                                 void *context, int vl, int mode, u64 data)
3369 {
3370         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372         return dd->send_egress_err_status_cnt[41];
3373 }
3374
3375 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3376                                 const struct cntr_entry *entry,
3377                                 void *context, int vl, int mode, u64 data)
3378 {
3379         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381         return dd->send_egress_err_status_cnt[40];
3382 }
3383
3384 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3385                                 const struct cntr_entry *entry,
3386                                 void *context, int vl, int mode, u64 data)
3387 {
3388         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390         return dd->send_egress_err_status_cnt[39];
3391 }
3392
3393 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3394                                 const struct cntr_entry *entry,
3395                                 void *context, int vl, int mode, u64 data)
3396 {
3397         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399         return dd->send_egress_err_status_cnt[38];
3400 }
3401
3402 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3403                                 const struct cntr_entry *entry,
3404                                 void *context, int vl, int mode, u64 data)
3405 {
3406         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408         return dd->send_egress_err_status_cnt[37];
3409 }
3410
3411 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3412                                 const struct cntr_entry *entry,
3413                                 void *context, int vl, int mode, u64 data)
3414 {
3415         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417         return dd->send_egress_err_status_cnt[36];
3418 }
3419
3420 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3421                                 const struct cntr_entry *entry,
3422                                 void *context, int vl, int mode, u64 data)
3423 {
3424         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426         return dd->send_egress_err_status_cnt[35];
3427 }
3428
3429 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3430                                 const struct cntr_entry *entry,
3431                                 void *context, int vl, int mode, u64 data)
3432 {
3433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435         return dd->send_egress_err_status_cnt[34];
3436 }
3437
3438 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3439                                 const struct cntr_entry *entry,
3440                                 void *context, int vl, int mode, u64 data)
3441 {
3442         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444         return dd->send_egress_err_status_cnt[33];
3445 }
3446
3447 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3448                                 const struct cntr_entry *entry,
3449                                 void *context, int vl, int mode, u64 data)
3450 {
3451         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453         return dd->send_egress_err_status_cnt[32];
3454 }
3455
3456 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3457                                 const struct cntr_entry *entry,
3458                                 void *context, int vl, int mode, u64 data)
3459 {
3460         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462         return dd->send_egress_err_status_cnt[31];
3463 }
3464
3465 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3466                                 const struct cntr_entry *entry,
3467                                 void *context, int vl, int mode, u64 data)
3468 {
3469         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471         return dd->send_egress_err_status_cnt[30];
3472 }
3473
3474 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3475                                 const struct cntr_entry *entry,
3476                                 void *context, int vl, int mode, u64 data)
3477 {
3478         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480         return dd->send_egress_err_status_cnt[29];
3481 }
3482
3483 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3484                                 const struct cntr_entry *entry,
3485                                 void *context, int vl, int mode, u64 data)
3486 {
3487         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489         return dd->send_egress_err_status_cnt[28];
3490 }
3491
3492 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3493                                 const struct cntr_entry *entry,
3494                                 void *context, int vl, int mode, u64 data)
3495 {
3496         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498         return dd->send_egress_err_status_cnt[27];
3499 }
3500
3501 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3502                                 const struct cntr_entry *entry,
3503                                 void *context, int vl, int mode, u64 data)
3504 {
3505         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507         return dd->send_egress_err_status_cnt[26];
3508 }
3509
3510 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3511                                 const struct cntr_entry *entry,
3512                                 void *context, int vl, int mode, u64 data)
3513 {
3514         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516         return dd->send_egress_err_status_cnt[25];
3517 }
3518
3519 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3520                                 const struct cntr_entry *entry,
3521                                 void *context, int vl, int mode, u64 data)
3522 {
3523         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525         return dd->send_egress_err_status_cnt[24];
3526 }
3527
3528 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3529                                 const struct cntr_entry *entry,
3530                                 void *context, int vl, int mode, u64 data)
3531 {
3532         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534         return dd->send_egress_err_status_cnt[23];
3535 }
3536
3537 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3538                                 const struct cntr_entry *entry,
3539                                 void *context, int vl, int mode, u64 data)
3540 {
3541         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543         return dd->send_egress_err_status_cnt[22];
3544 }
3545
3546 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3547                                 const struct cntr_entry *entry,
3548                                 void *context, int vl, int mode, u64 data)
3549 {
3550         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552         return dd->send_egress_err_status_cnt[21];
3553 }
3554
3555 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3556                                 const struct cntr_entry *entry,
3557                                 void *context, int vl, int mode, u64 data)
3558 {
3559         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561         return dd->send_egress_err_status_cnt[20];
3562 }
3563
3564 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3565                                 const struct cntr_entry *entry,
3566                                 void *context, int vl, int mode, u64 data)
3567 {
3568         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570         return dd->send_egress_err_status_cnt[19];
3571 }
3572
3573 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3574                                 const struct cntr_entry *entry,
3575                                 void *context, int vl, int mode, u64 data)
3576 {
3577         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578
3579         return dd->send_egress_err_status_cnt[18];
3580 }
3581
3582 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3583                                 const struct cntr_entry *entry,
3584                                 void *context, int vl, int mode, u64 data)
3585 {
3586         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587
3588         return dd->send_egress_err_status_cnt[17];
3589 }
3590
3591 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3592                                 const struct cntr_entry *entry,
3593                                 void *context, int vl, int mode, u64 data)
3594 {
3595         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596
3597         return dd->send_egress_err_status_cnt[16];
3598 }
3599
3600 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3601                                            void *context, int vl, int mode,
3602                                            u64 data)
3603 {
3604         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605
3606         return dd->send_egress_err_status_cnt[15];
3607 }
3608
3609 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3610                                                  void *context, int vl,
3611                                                  int mode, u64 data)
3612 {
3613         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614
3615         return dd->send_egress_err_status_cnt[14];
3616 }
3617
3618 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3619                                                void *context, int vl, int mode,
3620                                                u64 data)
3621 {
3622         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623
3624         return dd->send_egress_err_status_cnt[13];
3625 }
3626
3627 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3628                                         void *context, int vl, int mode,
3629                                         u64 data)
3630 {
3631         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632
3633         return dd->send_egress_err_status_cnt[12];
3634 }
3635
3636 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3637                                 const struct cntr_entry *entry,
3638                                 void *context, int vl, int mode, u64 data)
3639 {
3640         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642         return dd->send_egress_err_status_cnt[11];
3643 }
3644
3645 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3646                                              void *context, int vl, int mode,
3647                                              u64 data)
3648 {
3649         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650
3651         return dd->send_egress_err_status_cnt[10];
3652 }
3653
3654 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3655                                             void *context, int vl, int mode,
3656                                             u64 data)
3657 {
3658         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660         return dd->send_egress_err_status_cnt[9];
3661 }
3662
3663 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3664                                 const struct cntr_entry *entry,
3665                                 void *context, int vl, int mode, u64 data)
3666 {
3667         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669         return dd->send_egress_err_status_cnt[8];
3670 }
3671
3672 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3673                                 const struct cntr_entry *entry,
3674                                 void *context, int vl, int mode, u64 data)
3675 {
3676         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678         return dd->send_egress_err_status_cnt[7];
3679 }
3680
3681 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3682                                             void *context, int vl, int mode,
3683                                             u64 data)
3684 {
3685         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686
3687         return dd->send_egress_err_status_cnt[6];
3688 }
3689
3690 static u64 access_tx_incorrect_link_state_err_cnt(
3691                                 const struct cntr_entry *entry,
3692                                 void *context, int vl, int mode, u64 data)
3693 {
3694         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695
3696         return dd->send_egress_err_status_cnt[5];
3697 }
3698
3699 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3700                                       void *context, int vl, int mode,
3701                                       u64 data)
3702 {
3703         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704
3705         return dd->send_egress_err_status_cnt[4];
3706 }
3707
3708 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3709                                 const struct cntr_entry *entry,
3710                                 void *context, int vl, int mode, u64 data)
3711 {
3712         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713
3714         return dd->send_egress_err_status_cnt[3];
3715 }
3716
3717 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3718                                             void *context, int vl, int mode,
3719                                             u64 data)
3720 {
3721         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723         return dd->send_egress_err_status_cnt[2];
3724 }
3725
3726 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3727                                 const struct cntr_entry *entry,
3728                                 void *context, int vl, int mode, u64 data)
3729 {
3730         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732         return dd->send_egress_err_status_cnt[1];
3733 }
3734
3735 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3736                                 const struct cntr_entry *entry,
3737                                 void *context, int vl, int mode, u64 data)
3738 {
3739         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741         return dd->send_egress_err_status_cnt[0];
3742 }
3743
3744 /*
3745  * Software counters corresponding to each of the
3746  * error status bits within SendErrStatus
3747  */
3748 static u64 access_send_csr_write_bad_addr_err_cnt(
3749                                 const struct cntr_entry *entry,
3750                                 void *context, int vl, int mode, u64 data)
3751 {
3752         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753
3754         return dd->send_err_status_cnt[2];
3755 }
3756
3757 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3758                                                  void *context, int vl,
3759                                                  int mode, u64 data)
3760 {
3761         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762
3763         return dd->send_err_status_cnt[1];
3764 }
3765
3766 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3767                                       void *context, int vl, int mode,
3768                                       u64 data)
3769 {
3770         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771
3772         return dd->send_err_status_cnt[0];
3773 }
3774
3775 /*
3776  * Software counters corresponding to each of the
3777  * error status bits within SendCtxtErrStatus
3778  */
3779 static u64 access_pio_write_out_of_bounds_err_cnt(
3780                                 const struct cntr_entry *entry,
3781                                 void *context, int vl, int mode, u64 data)
3782 {
3783         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785         return dd->sw_ctxt_err_status_cnt[4];
3786 }
3787
3788 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3789                                              void *context, int vl, int mode,
3790                                              u64 data)
3791 {
3792         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794         return dd->sw_ctxt_err_status_cnt[3];
3795 }
3796
3797 static u64 access_pio_write_crosses_boundary_err_cnt(
3798                                 const struct cntr_entry *entry,
3799                                 void *context, int vl, int mode, u64 data)
3800 {
3801         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803         return dd->sw_ctxt_err_status_cnt[2];
3804 }
3805
3806 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3807                                                 void *context, int vl,
3808                                                 int mode, u64 data)
3809 {
3810         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812         return dd->sw_ctxt_err_status_cnt[1];
3813 }
3814
3815 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3816                                                void *context, int vl, int mode,
3817                                                u64 data)
3818 {
3819         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821         return dd->sw_ctxt_err_status_cnt[0];
3822 }
3823
3824 /*
3825  * Software counters corresponding to each of the
3826  * error status bits within SendDmaEngErrStatus
3827  */
3828 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3829                                 const struct cntr_entry *entry,
3830                                 void *context, int vl, int mode, u64 data)
3831 {
3832         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833
3834         return dd->sw_send_dma_eng_err_status_cnt[23];
3835 }
3836
3837 static u64 access_sdma_header_storage_cor_err_cnt(
3838                                 const struct cntr_entry *entry,
3839                                 void *context, int vl, int mode, u64 data)
3840 {
3841         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842
3843         return dd->sw_send_dma_eng_err_status_cnt[22];
3844 }
3845
3846 static u64 access_sdma_packet_tracking_cor_err_cnt(
3847                                 const struct cntr_entry *entry,
3848                                 void *context, int vl, int mode, u64 data)
3849 {
3850         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851
3852         return dd->sw_send_dma_eng_err_status_cnt[21];
3853 }
3854
3855 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3856                                             void *context, int vl, int mode,
3857                                             u64 data)
3858 {
3859         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860
3861         return dd->sw_send_dma_eng_err_status_cnt[20];
3862 }
3863
3864 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3865                                               void *context, int vl, int mode,
3866                                               u64 data)
3867 {
3868         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869
3870         return dd->sw_send_dma_eng_err_status_cnt[19];
3871 }
3872
3873 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3874                                 const struct cntr_entry *entry,
3875                                 void *context, int vl, int mode, u64 data)
3876 {
3877         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878
3879         return dd->sw_send_dma_eng_err_status_cnt[18];
3880 }
3881
3882 static u64 access_sdma_header_storage_unc_err_cnt(
3883                                 const struct cntr_entry *entry,
3884                                 void *context, int vl, int mode, u64 data)
3885 {
3886         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887
3888         return dd->sw_send_dma_eng_err_status_cnt[17];
3889 }
3890
3891 static u64 access_sdma_packet_tracking_unc_err_cnt(
3892                                 const struct cntr_entry *entry,
3893                                 void *context, int vl, int mode, u64 data)
3894 {
3895         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896
3897         return dd->sw_send_dma_eng_err_status_cnt[16];
3898 }
3899
3900 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3901                                             void *context, int vl, int mode,
3902                                             u64 data)
3903 {
3904         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905
3906         return dd->sw_send_dma_eng_err_status_cnt[15];
3907 }
3908
3909 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3910                                               void *context, int vl, int mode,
3911                                               u64 data)
3912 {
3913         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914
3915         return dd->sw_send_dma_eng_err_status_cnt[14];
3916 }
3917
3918 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3919                                        void *context, int vl, int mode,
3920                                        u64 data)
3921 {
3922         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923
3924         return dd->sw_send_dma_eng_err_status_cnt[13];
3925 }
3926
3927 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3928                                              void *context, int vl, int mode,
3929                                              u64 data)
3930 {
3931         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933         return dd->sw_send_dma_eng_err_status_cnt[12];
3934 }
3935
3936 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3937                                               void *context, int vl, int mode,
3938                                               u64 data)
3939 {
3940         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942         return dd->sw_send_dma_eng_err_status_cnt[11];
3943 }
3944
3945 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3946                                              void *context, int vl, int mode,
3947                                              u64 data)
3948 {
3949         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950
3951         return dd->sw_send_dma_eng_err_status_cnt[10];
3952 }
3953
3954 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3955                                           void *context, int vl, int mode,
3956                                           u64 data)
3957 {
3958         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959
3960         return dd->sw_send_dma_eng_err_status_cnt[9];
3961 }
3962
3963 static u64 access_sdma_packet_desc_overflow_err_cnt(
3964                                 const struct cntr_entry *entry,
3965                                 void *context, int vl, int mode, u64 data)
3966 {
3967         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968
3969         return dd->sw_send_dma_eng_err_status_cnt[8];
3970 }
3971
3972 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3973                                                void *context, int vl,
3974                                                int mode, u64 data)
3975 {
3976         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977
3978         return dd->sw_send_dma_eng_err_status_cnt[7];
3979 }
3980
3981 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3982                                     void *context, int vl, int mode, u64 data)
3983 {
3984         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985
3986         return dd->sw_send_dma_eng_err_status_cnt[6];
3987 }
3988
3989 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3990                                         void *context, int vl, int mode,
3991                                         u64 data)
3992 {
3993         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3994
3995         return dd->sw_send_dma_eng_err_status_cnt[5];
3996 }
3997
3998 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3999                                           void *context, int vl, int mode,
4000                                           u64 data)
4001 {
4002         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4003
4004         return dd->sw_send_dma_eng_err_status_cnt[4];
4005 }
4006
4007 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4008                                 const struct cntr_entry *entry,
4009                                 void *context, int vl, int mode, u64 data)
4010 {
4011         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4012
4013         return dd->sw_send_dma_eng_err_status_cnt[3];
4014 }
4015
4016 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4017                                         void *context, int vl, int mode,
4018                                         u64 data)
4019 {
4020         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4021
4022         return dd->sw_send_dma_eng_err_status_cnt[2];
4023 }
4024
4025 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4026                                             void *context, int vl, int mode,
4027                                             u64 data)
4028 {
4029         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4030
4031         return dd->sw_send_dma_eng_err_status_cnt[1];
4032 }
4033
4034 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4035                                         void *context, int vl, int mode,
4036                                         u64 data)
4037 {
4038         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4039
4040         return dd->sw_send_dma_eng_err_status_cnt[0];
4041 }
4042
4043 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4044                                  void *context, int vl, int mode,
4045                                  u64 data)
4046 {
4047         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4048
4049         u64 val = 0;
4050         u64 csr = entry->csr;
4051
4052         val = read_write_csr(dd, csr, mode, data);
4053         if (mode == CNTR_MODE_R) {
4054                 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4055                         CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4056         } else if (mode == CNTR_MODE_W) {
4057                 dd->sw_rcv_bypass_packet_errors = 0;
4058         } else {
4059                 dd_dev_err(dd, "Invalid cntr register access mode");
4060                 return 0;
4061         }
4062         return val;
4063 }
4064
4065 #define def_access_sw_cpu(cntr) \
4066 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
4067                               void *context, int vl, int mode, u64 data)      \
4068 {                                                                             \
4069         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4070         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
4071                               ppd->ibport_data.rvp.cntr, vl,                  \
4072                               mode, data);                                    \
4073 }
4074
4075 def_access_sw_cpu(rc_acks);
4076 def_access_sw_cpu(rc_qacks);
4077 def_access_sw_cpu(rc_delayed_comp);
4078
4079 #define def_access_ibp_counter(cntr) \
4080 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
4081                                 void *context, int vl, int mode, u64 data)    \
4082 {                                                                             \
4083         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4084                                                                               \
4085         if (vl != CNTR_INVALID_VL)                                            \
4086                 return 0;                                                     \
4087                                                                               \
4088         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4089                              mode, data);                                     \
4090 }
4091
4092 def_access_ibp_counter(loop_pkts);
4093 def_access_ibp_counter(rc_resends);
4094 def_access_ibp_counter(rnr_naks);
4095 def_access_ibp_counter(other_naks);
4096 def_access_ibp_counter(rc_timeouts);
4097 def_access_ibp_counter(pkt_drops);
4098 def_access_ibp_counter(dmawait);
4099 def_access_ibp_counter(rc_seqnak);
4100 def_access_ibp_counter(rc_dupreq);
4101 def_access_ibp_counter(rdma_seq);
4102 def_access_ibp_counter(unaligned);
4103 def_access_ibp_counter(seq_naks);
4104
4105 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4106 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4107 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4108                         CNTR_NORMAL),
4109 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4110                         CNTR_NORMAL),
4111 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4112                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
4113                         CNTR_NORMAL),
4114 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4115                         CNTR_NORMAL),
4116 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4117                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4118 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4119                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4120 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4121                         CNTR_NORMAL),
4122 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4123                         CNTR_NORMAL),
4124 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4125                         CNTR_NORMAL),
4126 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4127                         CNTR_NORMAL),
4128 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4129                         CNTR_NORMAL),
4130 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4131                         CNTR_NORMAL),
4132 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4133                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4134 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4135                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4136 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4137                               CNTR_SYNTH),
4138 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4139                             access_dc_rcv_err_cnt),
4140 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4141                                  CNTR_SYNTH),
4142 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4143                                   CNTR_SYNTH),
4144 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4145                                   CNTR_SYNTH),
4146 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4147                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4148 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4149                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4150                                   CNTR_SYNTH),
4151 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4152                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4153 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4154                                CNTR_SYNTH),
4155 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4156                               CNTR_SYNTH),
4157 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4158                                CNTR_SYNTH),
4159 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4160                                  CNTR_SYNTH),
4161 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4162                                 CNTR_SYNTH),
4163 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4164                                 CNTR_SYNTH),
4165 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4166                                CNTR_SYNTH),
4167 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4168                                  CNTR_SYNTH | CNTR_VL),
4169 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4170                                 CNTR_SYNTH | CNTR_VL),
4171 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4172 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4173                                  CNTR_SYNTH | CNTR_VL),
4174 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4175 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4176                                  CNTR_SYNTH | CNTR_VL),
4177 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4178                               CNTR_SYNTH),
4179 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4180                                  CNTR_SYNTH | CNTR_VL),
4181 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4182                                 CNTR_SYNTH),
4183 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4184                                    CNTR_SYNTH | CNTR_VL),
4185 [C_DC_TOTAL_CRC] =
4186         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4187                          CNTR_SYNTH),
4188 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4189                                   CNTR_SYNTH),
4190 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4191                                   CNTR_SYNTH),
4192 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4193                                   CNTR_SYNTH),
4194 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4195                                   CNTR_SYNTH),
4196 [C_DC_CRC_MULT_LN] =
4197         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4198                          CNTR_SYNTH),
4199 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4200                                     CNTR_SYNTH),
4201 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4202                                     CNTR_SYNTH),
4203 [C_DC_SEQ_CRC_CNT] =
4204         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4205                          CNTR_SYNTH),
4206 [C_DC_ESC0_ONLY_CNT] =
4207         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4208                          CNTR_SYNTH),
4209 [C_DC_ESC0_PLUS1_CNT] =
4210         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4211                          CNTR_SYNTH),
4212 [C_DC_ESC0_PLUS2_CNT] =
4213         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4214                          CNTR_SYNTH),
4215 [C_DC_REINIT_FROM_PEER_CNT] =
4216         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4217                          CNTR_SYNTH),
4218 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4219                                   CNTR_SYNTH),
4220 [C_DC_MISC_FLG_CNT] =
4221         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4222                          CNTR_SYNTH),
4223 [C_DC_PRF_GOOD_LTP_CNT] =
4224         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4225 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4226         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4227                          CNTR_SYNTH),
4228 [C_DC_PRF_RX_FLIT_CNT] =
4229         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4230 [C_DC_PRF_TX_FLIT_CNT] =
4231         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4232 [C_DC_PRF_CLK_CNTR] =
4233         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4234 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4235         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4236 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4237         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4238                          CNTR_SYNTH),
4239 [C_DC_PG_STS_TX_SBE_CNT] =
4240         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4241 [C_DC_PG_STS_TX_MBE_CNT] =
4242         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4243                          CNTR_SYNTH),
4244 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4245                             access_sw_cpu_intr),
4246 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4247                             access_sw_cpu_rcv_limit),
4248 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4249                             access_sw_vtx_wait),
4250 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4251                             access_sw_pio_wait),
4252 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4253                             access_sw_pio_drain),
4254 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4255                             access_sw_kmem_wait),
4256 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4257                             hfi1_access_sw_tid_wait),
4258 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4259                             access_sw_send_schedule),
4260 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4261                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4262                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4263                                       dev_access_u32_csr),
4264 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4265                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4266                              access_sde_int_cnt),
4267 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4268                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4269                              access_sde_err_cnt),
4270 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4271                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4272                                   access_sde_idle_int_cnt),
4273 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4274                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4275                                       access_sde_progress_int_cnt),
4276 /* MISC_ERR_STATUS */
4277 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4278                                 CNTR_NORMAL,
4279                                 access_misc_pll_lock_fail_err_cnt),
4280 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4281                                 CNTR_NORMAL,
4282                                 access_misc_mbist_fail_err_cnt),
4283 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4284                                 CNTR_NORMAL,
4285                                 access_misc_invalid_eep_cmd_err_cnt),
4286 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4287                                 CNTR_NORMAL,
4288                                 access_misc_efuse_done_parity_err_cnt),
4289 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4290                                 CNTR_NORMAL,
4291                                 access_misc_efuse_write_err_cnt),
4292 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4293                                 0, CNTR_NORMAL,
4294                                 access_misc_efuse_read_bad_addr_err_cnt),
4295 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4296                                 CNTR_NORMAL,
4297                                 access_misc_efuse_csr_parity_err_cnt),
4298 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4299                                 CNTR_NORMAL,
4300                                 access_misc_fw_auth_failed_err_cnt),
4301 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4302                                 CNTR_NORMAL,
4303                                 access_misc_key_mismatch_err_cnt),
4304 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4305                                 CNTR_NORMAL,
4306                                 access_misc_sbus_write_failed_err_cnt),
4307 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4308                                 CNTR_NORMAL,
4309                                 access_misc_csr_write_bad_addr_err_cnt),
4310 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4311                                 CNTR_NORMAL,
4312                                 access_misc_csr_read_bad_addr_err_cnt),
4313 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4314                                 CNTR_NORMAL,
4315                                 access_misc_csr_parity_err_cnt),
4316 /* CceErrStatus */
4317 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4318                                 CNTR_NORMAL,
4319                                 access_sw_cce_err_status_aggregated_cnt),
4320 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4321                                 CNTR_NORMAL,
4322                                 access_cce_msix_csr_parity_err_cnt),
4323 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4324                                 CNTR_NORMAL,
4325                                 access_cce_int_map_unc_err_cnt),
4326 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4327                                 CNTR_NORMAL,
4328                                 access_cce_int_map_cor_err_cnt),
4329 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4330                                 CNTR_NORMAL,
4331                                 access_cce_msix_table_unc_err_cnt),
4332 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4333                                 CNTR_NORMAL,
4334                                 access_cce_msix_table_cor_err_cnt),
4335 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4336                                 0, CNTR_NORMAL,
4337                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4338 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4339                                 0, CNTR_NORMAL,
4340                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4341 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4342                                 CNTR_NORMAL,
4343                                 access_cce_seg_write_bad_addr_err_cnt),
4344 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4345                                 CNTR_NORMAL,
4346                                 access_cce_seg_read_bad_addr_err_cnt),
4347 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4348                                 CNTR_NORMAL,
4349                                 access_la_triggered_cnt),
4350 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4351                                 CNTR_NORMAL,
4352                                 access_cce_trgt_cpl_timeout_err_cnt),
4353 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4354                                 CNTR_NORMAL,
4355                                 access_pcic_receive_parity_err_cnt),
4356 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4357                                 CNTR_NORMAL,
4358                                 access_pcic_transmit_back_parity_err_cnt),
4359 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4360                                 0, CNTR_NORMAL,
4361                                 access_pcic_transmit_front_parity_err_cnt),
4362 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4363                                 CNTR_NORMAL,
4364                                 access_pcic_cpl_dat_q_unc_err_cnt),
4365 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4366                                 CNTR_NORMAL,
4367                                 access_pcic_cpl_hd_q_unc_err_cnt),
4368 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4369                                 CNTR_NORMAL,
4370                                 access_pcic_post_dat_q_unc_err_cnt),
4371 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4372                                 CNTR_NORMAL,
4373                                 access_pcic_post_hd_q_unc_err_cnt),
4374 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4375                                 CNTR_NORMAL,
4376                                 access_pcic_retry_sot_mem_unc_err_cnt),
4377 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4378                                 CNTR_NORMAL,
4379                                 access_pcic_retry_mem_unc_err),
4380 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4381                                 CNTR_NORMAL,
4382                                 access_pcic_n_post_dat_q_parity_err_cnt),
4383 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4384                                 CNTR_NORMAL,
4385                                 access_pcic_n_post_h_q_parity_err_cnt),
4386 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4387                                 CNTR_NORMAL,
4388                                 access_pcic_cpl_dat_q_cor_err_cnt),
4389 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4390                                 CNTR_NORMAL,
4391                                 access_pcic_cpl_hd_q_cor_err_cnt),
4392 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4393                                 CNTR_NORMAL,
4394                                 access_pcic_post_dat_q_cor_err_cnt),
4395 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4396                                 CNTR_NORMAL,
4397                                 access_pcic_post_hd_q_cor_err_cnt),
4398 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4399                                 CNTR_NORMAL,
4400                                 access_pcic_retry_sot_mem_cor_err_cnt),
4401 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4402                                 CNTR_NORMAL,
4403                                 access_pcic_retry_mem_cor_err_cnt),
4404 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4405                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4406                                 CNTR_NORMAL,
4407                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4408 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4409                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4410                                 CNTR_NORMAL,
4411                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4412                                 ),
4413 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4414                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4415                         CNTR_NORMAL,
4416                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4417 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4418                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4419                         CNTR_NORMAL,
4420                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4421 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4422                         0, CNTR_NORMAL,
4423                         access_cce_cli2_async_fifo_parity_err_cnt),
4424 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4425                         CNTR_NORMAL,
4426                         access_cce_csr_cfg_bus_parity_err_cnt),
4427 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4428                         0, CNTR_NORMAL,
4429                         access_cce_cli0_async_fifo_parity_err_cnt),
4430 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4431                         CNTR_NORMAL,
4432                         access_cce_rspd_data_parity_err_cnt),
4433 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4434                         CNTR_NORMAL,
4435                         access_cce_trgt_access_err_cnt),
4436 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4437                         0, CNTR_NORMAL,
4438                         access_cce_trgt_async_fifo_parity_err_cnt),
4439 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4440                         CNTR_NORMAL,
4441                         access_cce_csr_write_bad_addr_err_cnt),
4442 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4443                         CNTR_NORMAL,
4444                         access_cce_csr_read_bad_addr_err_cnt),
4445 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4446                         CNTR_NORMAL,
4447                         access_ccs_csr_parity_err_cnt),
4448
4449 /* RcvErrStatus */
4450 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4451                         CNTR_NORMAL,
4452                         access_rx_csr_parity_err_cnt),
4453 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4454                         CNTR_NORMAL,
4455                         access_rx_csr_write_bad_addr_err_cnt),
4456 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4457                         CNTR_NORMAL,
4458                         access_rx_csr_read_bad_addr_err_cnt),
4459 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4460                         CNTR_NORMAL,
4461                         access_rx_dma_csr_unc_err_cnt),
4462 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4463                         CNTR_NORMAL,
4464                         access_rx_dma_dq_fsm_encoding_err_cnt),
4465 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4466                         CNTR_NORMAL,
4467                         access_rx_dma_eq_fsm_encoding_err_cnt),
4468 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4469                         CNTR_NORMAL,
4470                         access_rx_dma_csr_parity_err_cnt),
4471 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4472                         CNTR_NORMAL,
4473                         access_rx_rbuf_data_cor_err_cnt),
4474 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4475                         CNTR_NORMAL,
4476                         access_rx_rbuf_data_unc_err_cnt),
4477 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4478                         CNTR_NORMAL,
4479                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4480 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4481                         CNTR_NORMAL,
4482                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4483 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4484                         CNTR_NORMAL,
4485                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4486 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4487                         CNTR_NORMAL,
4488                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4489 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4490                         CNTR_NORMAL,
4491                         access_rx_rbuf_desc_part2_cor_err_cnt),
4492 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4493                         CNTR_NORMAL,
4494                         access_rx_rbuf_desc_part2_unc_err_cnt),
4495 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4496                         CNTR_NORMAL,
4497                         access_rx_rbuf_desc_part1_cor_err_cnt),
4498 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4499                         CNTR_NORMAL,
4500                         access_rx_rbuf_desc_part1_unc_err_cnt),
4501 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4502                         CNTR_NORMAL,
4503                         access_rx_hq_intr_fsm_err_cnt),
4504 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4505                         CNTR_NORMAL,
4506                         access_rx_hq_intr_csr_parity_err_cnt),
4507 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4508                         CNTR_NORMAL,
4509                         access_rx_lookup_csr_parity_err_cnt),
4510 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4511                         CNTR_NORMAL,
4512                         access_rx_lookup_rcv_array_cor_err_cnt),
4513 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4514                         CNTR_NORMAL,
4515                         access_rx_lookup_rcv_array_unc_err_cnt),
4516 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4517                         0, CNTR_NORMAL,
4518                         access_rx_lookup_des_part2_parity_err_cnt),
4519 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4520                         0, CNTR_NORMAL,
4521                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4522 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4523                         CNTR_NORMAL,
4524                         access_rx_lookup_des_part1_unc_err_cnt),
4525 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4526                         CNTR_NORMAL,
4527                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4528 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4529                         CNTR_NORMAL,
4530                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4531 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4532                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4533                         CNTR_NORMAL,
4534                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4535 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4536                         0, CNTR_NORMAL,
4537                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4538 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4539                         0, CNTR_NORMAL,
4540                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4541 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4542                         CNTR_NORMAL,
4543                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4544 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4545                         CNTR_NORMAL,
4546                         access_rx_rbuf_empty_err_cnt),
4547 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4548                         CNTR_NORMAL,
4549                         access_rx_rbuf_full_err_cnt),
4550 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4551                         CNTR_NORMAL,
4552                         access_rbuf_bad_lookup_err_cnt),
4553 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4554                         CNTR_NORMAL,
4555                         access_rbuf_ctx_id_parity_err_cnt),
4556 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4557                         CNTR_NORMAL,
4558                         access_rbuf_csr_qeopdw_parity_err_cnt),
4559 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4560                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4561                         CNTR_NORMAL,
4562                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4563 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4564                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4565                         CNTR_NORMAL,
4566                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4567 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4568                         0, CNTR_NORMAL,
4569                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4570 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4571                         0, CNTR_NORMAL,
4572                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4573 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4574                         0, 0, CNTR_NORMAL,
4575                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4576 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4577                         0, CNTR_NORMAL,
4578                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4579 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4580                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4581                         CNTR_NORMAL,
4582                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4583 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4584                         0, CNTR_NORMAL,
4585                         access_rx_rbuf_block_list_read_cor_err_cnt),
4586 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4587                         0, CNTR_NORMAL,
4588                         access_rx_rbuf_block_list_read_unc_err_cnt),
4589 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4590                         CNTR_NORMAL,
4591                         access_rx_rbuf_lookup_des_cor_err_cnt),
4592 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4593                         CNTR_NORMAL,
4594                         access_rx_rbuf_lookup_des_unc_err_cnt),
4595 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4596                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4597                         CNTR_NORMAL,
4598                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4599 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4600                         CNTR_NORMAL,
4601                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4602 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4603                         CNTR_NORMAL,
4604                         access_rx_rbuf_free_list_cor_err_cnt),
4605 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4606                         CNTR_NORMAL,
4607                         access_rx_rbuf_free_list_unc_err_cnt),
4608 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4609                         CNTR_NORMAL,
4610                         access_rx_rcv_fsm_encoding_err_cnt),
4611 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4612                         CNTR_NORMAL,
4613                         access_rx_dma_flag_cor_err_cnt),
4614 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4615                         CNTR_NORMAL,
4616                         access_rx_dma_flag_unc_err_cnt),
4617 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4618                         CNTR_NORMAL,
4619                         access_rx_dc_sop_eop_parity_err_cnt),
4620 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4621                         CNTR_NORMAL,
4622                         access_rx_rcv_csr_parity_err_cnt),
4623 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4624                         CNTR_NORMAL,
4625                         access_rx_rcv_qp_map_table_cor_err_cnt),
4626 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4627                         CNTR_NORMAL,
4628                         access_rx_rcv_qp_map_table_unc_err_cnt),
4629 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4630                         CNTR_NORMAL,
4631                         access_rx_rcv_data_cor_err_cnt),
4632 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4633                         CNTR_NORMAL,
4634                         access_rx_rcv_data_unc_err_cnt),
4635 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4636                         CNTR_NORMAL,
4637                         access_rx_rcv_hdr_cor_err_cnt),
4638 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4639                         CNTR_NORMAL,
4640                         access_rx_rcv_hdr_unc_err_cnt),
4641 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4642                         CNTR_NORMAL,
4643                         access_rx_dc_intf_parity_err_cnt),
4644 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4645                         CNTR_NORMAL,
4646                         access_rx_dma_csr_cor_err_cnt),
4647 /* SendPioErrStatus */
4648 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4649                         CNTR_NORMAL,
4650                         access_pio_pec_sop_head_parity_err_cnt),
4651 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4652                         CNTR_NORMAL,
4653                         access_pio_pcc_sop_head_parity_err_cnt),
4654 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4655                         0, 0, CNTR_NORMAL,
4656                         access_pio_last_returned_cnt_parity_err_cnt),
4657 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4658                         0, CNTR_NORMAL,
4659                         access_pio_current_free_cnt_parity_err_cnt),
4660 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4661                         CNTR_NORMAL,
4662                         access_pio_reserved_31_err_cnt),
4663 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4664                         CNTR_NORMAL,
4665                         access_pio_reserved_30_err_cnt),
4666 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4667                         CNTR_NORMAL,
4668                         access_pio_ppmc_sop_len_err_cnt),
4669 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4670                         CNTR_NORMAL,
4671                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4672 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4673                         CNTR_NORMAL,
4674                         access_pio_vl_fifo_parity_err_cnt),
4675 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4676                         CNTR_NORMAL,
4677                         access_pio_vlf_sop_parity_err_cnt),
4678 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4679                         CNTR_NORMAL,
4680                         access_pio_vlf_v1_len_parity_err_cnt),
4681 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4682                         CNTR_NORMAL,
4683                         access_pio_block_qw_count_parity_err_cnt),
4684 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4685                         CNTR_NORMAL,
4686                         access_pio_write_qw_valid_parity_err_cnt),
4687 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4688                         CNTR_NORMAL,
4689                         access_pio_state_machine_err_cnt),
4690 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4691                         CNTR_NORMAL,
4692                         access_pio_write_data_parity_err_cnt),
4693 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4694                         CNTR_NORMAL,
4695                         access_pio_host_addr_mem_cor_err_cnt),
4696 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4697                         CNTR_NORMAL,
4698                         access_pio_host_addr_mem_unc_err_cnt),
4699 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4700                         CNTR_NORMAL,
4701                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4702 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4703                         CNTR_NORMAL,
4704                         access_pio_init_sm_in_err_cnt),
4705 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4706                         CNTR_NORMAL,
4707                         access_pio_ppmc_pbl_fifo_err_cnt),
4708 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4709                         0, CNTR_NORMAL,
4710                         access_pio_credit_ret_fifo_parity_err_cnt),
4711 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4712                         CNTR_NORMAL,
4713                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4714 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4715                         CNTR_NORMAL,
4716                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4717 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4718                         CNTR_NORMAL,
4719                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4720 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4721                         CNTR_NORMAL,
4722                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4723 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4724                         CNTR_NORMAL,
4725                         access_pio_sm_pkt_reset_parity_err_cnt),
4726 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4727                         CNTR_NORMAL,
4728                         access_pio_pkt_evict_fifo_parity_err_cnt),
4729 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4730                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4731                         CNTR_NORMAL,
4732                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4733 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4734                         CNTR_NORMAL,
4735                         access_pio_sbrdctl_crrel_parity_err_cnt),
4736 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4737                         CNTR_NORMAL,
4738                         access_pio_pec_fifo_parity_err_cnt),
4739 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4740                         CNTR_NORMAL,
4741                         access_pio_pcc_fifo_parity_err_cnt),
4742 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4743                         CNTR_NORMAL,
4744                         access_pio_sb_mem_fifo1_err_cnt),
4745 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4746                         CNTR_NORMAL,
4747                         access_pio_sb_mem_fifo0_err_cnt),
4748 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4749                         CNTR_NORMAL,
4750                         access_pio_csr_parity_err_cnt),
4751 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4752                         CNTR_NORMAL,
4753                         access_pio_write_addr_parity_err_cnt),
4754 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4755                         CNTR_NORMAL,
4756                         access_pio_write_bad_ctxt_err_cnt),
4757 /* SendDmaErrStatus */
4758 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4759                         0, CNTR_NORMAL,
4760                         access_sdma_pcie_req_tracking_cor_err_cnt),
4761 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4762                         0, CNTR_NORMAL,
4763                         access_sdma_pcie_req_tracking_unc_err_cnt),
4764 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4765                         CNTR_NORMAL,
4766                         access_sdma_csr_parity_err_cnt),
4767 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4768                         CNTR_NORMAL,
4769                         access_sdma_rpy_tag_err_cnt),
4770 /* SendEgressErrStatus */
4771 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4772                         CNTR_NORMAL,
4773                         access_tx_read_pio_memory_csr_unc_err_cnt),
4774 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4775                         0, CNTR_NORMAL,
4776                         access_tx_read_sdma_memory_csr_err_cnt),
4777 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4778                         CNTR_NORMAL,
4779                         access_tx_egress_fifo_cor_err_cnt),
4780 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4781                         CNTR_NORMAL,
4782                         access_tx_read_pio_memory_cor_err_cnt),
4783 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4784                         CNTR_NORMAL,
4785                         access_tx_read_sdma_memory_cor_err_cnt),
4786 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4787                         CNTR_NORMAL,
4788                         access_tx_sb_hdr_cor_err_cnt),
4789 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4790                         CNTR_NORMAL,
4791                         access_tx_credit_overrun_err_cnt),
4792 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4793                         CNTR_NORMAL,
4794                         access_tx_launch_fifo8_cor_err_cnt),
4795 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4796                         CNTR_NORMAL,
4797                         access_tx_launch_fifo7_cor_err_cnt),
4798 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4799                         CNTR_NORMAL,
4800                         access_tx_launch_fifo6_cor_err_cnt),
4801 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4802                         CNTR_NORMAL,
4803                         access_tx_launch_fifo5_cor_err_cnt),
4804 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4805                         CNTR_NORMAL,
4806                         access_tx_launch_fifo4_cor_err_cnt),
4807 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4808                         CNTR_NORMAL,
4809                         access_tx_launch_fifo3_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4811                         CNTR_NORMAL,
4812                         access_tx_launch_fifo2_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4814                         CNTR_NORMAL,
4815                         access_tx_launch_fifo1_cor_err_cnt),
4816 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4817                         CNTR_NORMAL,
4818                         access_tx_launch_fifo0_cor_err_cnt),
4819 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4820                         CNTR_NORMAL,
4821                         access_tx_credit_return_vl_err_cnt),
4822 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4823                         CNTR_NORMAL,
4824                         access_tx_hcrc_insertion_err_cnt),
4825 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4826                         CNTR_NORMAL,
4827                         access_tx_egress_fifo_unc_err_cnt),
4828 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4829                         CNTR_NORMAL,
4830                         access_tx_read_pio_memory_unc_err_cnt),
4831 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4832                         CNTR_NORMAL,
4833                         access_tx_read_sdma_memory_unc_err_cnt),
4834 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4835                         CNTR_NORMAL,
4836                         access_tx_sb_hdr_unc_err_cnt),
4837 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4838                         CNTR_NORMAL,
4839                         access_tx_credit_return_partiy_err_cnt),
4840 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4841                         0, 0, CNTR_NORMAL,
4842                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4843 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4844                         0, 0, CNTR_NORMAL,
4845                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4846 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4847                         0, 0, CNTR_NORMAL,
4848                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4849 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4850                         0, 0, CNTR_NORMAL,
4851                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4852 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4853                         0, 0, CNTR_NORMAL,
4854                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4855 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4856                         0, 0, CNTR_NORMAL,
4857                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4859                         0, 0, CNTR_NORMAL,
4860                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4862                         0, 0, CNTR_NORMAL,
4863                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4864 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4865                         0, 0, CNTR_NORMAL,
4866                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4867 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4868                         0, 0, CNTR_NORMAL,
4869                         access_tx_sdma15_disallowed_packet_err_cnt),
4870 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4871                         0, 0, CNTR_NORMAL,
4872                         access_tx_sdma14_disallowed_packet_err_cnt),
4873 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4874                         0, 0, CNTR_NORMAL,
4875                         access_tx_sdma13_disallowed_packet_err_cnt),
4876 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4877                         0, 0, CNTR_NORMAL,
4878                         access_tx_sdma12_disallowed_packet_err_cnt),
4879 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4880                         0, 0, CNTR_NORMAL,
4881                         access_tx_sdma11_disallowed_packet_err_cnt),
4882 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4883                         0, 0, CNTR_NORMAL,
4884                         access_tx_sdma10_disallowed_packet_err_cnt),
4885 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4886                         0, 0, CNTR_NORMAL,
4887                         access_tx_sdma9_disallowed_packet_err_cnt),
4888 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4889                         0, 0, CNTR_NORMAL,
4890                         access_tx_sdma8_disallowed_packet_err_cnt),
4891 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4892                         0, 0, CNTR_NORMAL,
4893                         access_tx_sdma7_disallowed_packet_err_cnt),
4894 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4895                         0, 0, CNTR_NORMAL,
4896                         access_tx_sdma6_disallowed_packet_err_cnt),
4897 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4898                         0, 0, CNTR_NORMAL,
4899                         access_tx_sdma5_disallowed_packet_err_cnt),
4900 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4901                         0, 0, CNTR_NORMAL,
4902                         access_tx_sdma4_disallowed_packet_err_cnt),
4903 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4904                         0, 0, CNTR_NORMAL,
4905                         access_tx_sdma3_disallowed_packet_err_cnt),
4906 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4907                         0, 0, CNTR_NORMAL,
4908                         access_tx_sdma2_disallowed_packet_err_cnt),
4909 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4910                         0, 0, CNTR_NORMAL,
4911                         access_tx_sdma1_disallowed_packet_err_cnt),
4912 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4913                         0, 0, CNTR_NORMAL,
4914                         access_tx_sdma0_disallowed_packet_err_cnt),
4915 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4916                         CNTR_NORMAL,
4917                         access_tx_config_parity_err_cnt),
4918 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4919                         CNTR_NORMAL,
4920                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4921 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4922                         CNTR_NORMAL,
4923                         access_tx_launch_csr_parity_err_cnt),
4924 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4925                         CNTR_NORMAL,
4926                         access_tx_illegal_vl_err_cnt),
4927 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4928                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4929                         CNTR_NORMAL,
4930                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4931 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4932                         CNTR_NORMAL,
4933                         access_egress_reserved_10_err_cnt),
4934 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4935                         CNTR_NORMAL,
4936                         access_egress_reserved_9_err_cnt),
4937 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4938                         0, 0, CNTR_NORMAL,
4939                         access_tx_sdma_launch_intf_parity_err_cnt),
4940 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4941                         CNTR_NORMAL,
4942                         access_tx_pio_launch_intf_parity_err_cnt),
4943 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4944                         CNTR_NORMAL,
4945                         access_egress_reserved_6_err_cnt),
4946 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4947                         CNTR_NORMAL,
4948                         access_tx_incorrect_link_state_err_cnt),
4949 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4950                         CNTR_NORMAL,
4951                         access_tx_linkdown_err_cnt),
4952 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4953                         "EgressFifoUnderrunOrParityErr", 0, 0,
4954                         CNTR_NORMAL,
4955                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4956 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4957                         CNTR_NORMAL,
4958                         access_egress_reserved_2_err_cnt),
4959 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4960                         CNTR_NORMAL,
4961                         access_tx_pkt_integrity_mem_unc_err_cnt),
4962 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4963                         CNTR_NORMAL,
4964                         access_tx_pkt_integrity_mem_cor_err_cnt),
4965 /* SendErrStatus */
4966 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4967                         CNTR_NORMAL,
4968                         access_send_csr_write_bad_addr_err_cnt),
4969 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4970                         CNTR_NORMAL,
4971                         access_send_csr_read_bad_addr_err_cnt),
4972 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4973                         CNTR_NORMAL,
4974                         access_send_csr_parity_cnt),
4975 /* SendCtxtErrStatus */
4976 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4977                         CNTR_NORMAL,
4978                         access_pio_write_out_of_bounds_err_cnt),
4979 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4980                         CNTR_NORMAL,
4981                         access_pio_write_overflow_err_cnt),
4982 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4983                         0, 0, CNTR_NORMAL,
4984                         access_pio_write_crosses_boundary_err_cnt),
4985 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4986                         CNTR_NORMAL,
4987                         access_pio_disallowed_packet_err_cnt),
4988 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4989                         CNTR_NORMAL,
4990                         access_pio_inconsistent_sop_err_cnt),
4991 /* SendDmaEngErrStatus */
4992 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4993                         0, 0, CNTR_NORMAL,
4994                         access_sdma_header_request_fifo_cor_err_cnt),
4995 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4996                         CNTR_NORMAL,
4997                         access_sdma_header_storage_cor_err_cnt),
4998 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4999                         CNTR_NORMAL,
5000                         access_sdma_packet_tracking_cor_err_cnt),
5001 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5002                         CNTR_NORMAL,
5003                         access_sdma_assembly_cor_err_cnt),
5004 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5005                         CNTR_NORMAL,
5006                         access_sdma_desc_table_cor_err_cnt),
5007 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5008                         0, 0, CNTR_NORMAL,
5009                         access_sdma_header_request_fifo_unc_err_cnt),
5010 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5011                         CNTR_NORMAL,
5012                         access_sdma_header_storage_unc_err_cnt),
5013 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5014                         CNTR_NORMAL,
5015                         access_sdma_packet_tracking_unc_err_cnt),
5016 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5017                         CNTR_NORMAL,
5018                         access_sdma_assembly_unc_err_cnt),
5019 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5020                         CNTR_NORMAL,
5021                         access_sdma_desc_table_unc_err_cnt),
5022 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5023                         CNTR_NORMAL,
5024                         access_sdma_timeout_err_cnt),
5025 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5026                         CNTR_NORMAL,
5027                         access_sdma_header_length_err_cnt),
5028 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5029                         CNTR_NORMAL,
5030                         access_sdma_header_address_err_cnt),
5031 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5032                         CNTR_NORMAL,
5033                         access_sdma_header_select_err_cnt),
5034 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5035                         CNTR_NORMAL,
5036                         access_sdma_reserved_9_err_cnt),
5037 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5038                         CNTR_NORMAL,
5039                         access_sdma_packet_desc_overflow_err_cnt),
5040 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5041                         CNTR_NORMAL,
5042                         access_sdma_length_mismatch_err_cnt),
5043 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5044                         CNTR_NORMAL,
5045                         access_sdma_halt_err_cnt),
5046 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5047                         CNTR_NORMAL,
5048                         access_sdma_mem_read_err_cnt),
5049 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5050                         CNTR_NORMAL,
5051                         access_sdma_first_desc_err_cnt),
5052 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5053                         CNTR_NORMAL,
5054                         access_sdma_tail_out_of_bounds_err_cnt),
5055 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5056                         CNTR_NORMAL,
5057                         access_sdma_too_long_err_cnt),
5058 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5059                         CNTR_NORMAL,
5060                         access_sdma_gen_mismatch_err_cnt),
5061 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5062                         CNTR_NORMAL,
5063                         access_sdma_wrong_dw_err_cnt),
5064 };
5065
5066 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5067 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5068                         CNTR_NORMAL),
5069 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5070                         CNTR_NORMAL),
5071 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5072                         CNTR_NORMAL),
5073 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5074                         CNTR_NORMAL),
5075 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5076                         CNTR_NORMAL),
5077 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5078                         CNTR_NORMAL),
5079 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5080                         CNTR_NORMAL),
5081 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5082 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5083 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5084 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5085                                       CNTR_SYNTH | CNTR_VL),
5086 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5087                                      CNTR_SYNTH | CNTR_VL),
5088 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5089                                       CNTR_SYNTH | CNTR_VL),
5090 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5091 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5092 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5093                              access_sw_link_dn_cnt),
5094 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5095                            access_sw_link_up_cnt),
5096 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5097                                  access_sw_unknown_frame_cnt),
5098 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5099                              access_sw_xmit_discards),
5100 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5101                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5102                                 access_sw_xmit_discards),
5103 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5104                                  access_xmit_constraint_errs),
5105 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5106                                 access_rcv_constraint_errs),
5107 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5108 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5109 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5110 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5111 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5112 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5113 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5114 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5115 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5116 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5117 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5118 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5119 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5120                                access_sw_cpu_rc_acks),
5121 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5122                                 access_sw_cpu_rc_qacks),
5123 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5124                                        access_sw_cpu_rc_delayed_comp),
5125 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5126 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5127 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5128 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5129 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5130 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5131 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5132 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5133 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5134 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5135 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5136 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5137 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5138 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5139 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5140 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5141 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5142 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5143 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5144 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5145 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5146 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5147 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5148 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5149 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5150 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5151 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5152 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5153 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5154 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5155 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5156 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5157 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5158 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5159 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5160 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5161 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5162 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5163 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5164 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5165 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5166 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5167 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5168 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5169 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5170 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5171 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5172 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5173 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5174 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5175 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5176 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5177 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5178 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5179 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5180 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5181 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5182 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5183 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5184 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5185 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5186 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5187 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5188 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5189 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5190 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5191 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5192 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5193 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5194 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5195 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5196 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5197 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5198 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5199 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5200 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5201 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5202 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5203 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5204 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5205 };
5206
5207 /* ======================================================================== */
5208
5209 /* return true if this is chip revision revision a */
5210 int is_ax(struct hfi1_devdata *dd)
5211 {
5212         u8 chip_rev_minor =
5213                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5214                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5215         return (chip_rev_minor & 0xf0) == 0;
5216 }
5217
5218 /* return true if this is chip revision revision b */
5219 int is_bx(struct hfi1_devdata *dd)
5220 {
5221         u8 chip_rev_minor =
5222                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5223                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5224         return (chip_rev_minor & 0xF0) == 0x10;
5225 }
5226
5227 /* return true is kernel urg disabled for rcd */
5228 bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5229 {
5230         u64 mask;
5231         u32 is = IS_RCVURGENT_START + rcd->ctxt;
5232         u8 bit = is % 64;
5233
5234         mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5235         return !(mask & BIT_ULL(bit));
5236 }
5237
5238 /*
5239  * Append string s to buffer buf.  Arguments curp and len are the current
5240  * position and remaining length, respectively.
5241  *
5242  * return 0 on success, 1 on out of room
5243  */
5244 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5245 {
5246         char *p = *curp;
5247         int len = *lenp;
5248         int result = 0; /* success */
5249         char c;
5250
5251         /* add a comma, if first in the buffer */
5252         if (p != buf) {
5253                 if (len == 0) {
5254                         result = 1; /* out of room */
5255                         goto done;
5256                 }
5257                 *p++ = ',';
5258                 len--;
5259         }
5260
5261         /* copy the string */
5262         while ((c = *s++) != 0) {
5263                 if (len == 0) {
5264                         result = 1; /* out of room */
5265                         goto done;
5266                 }
5267                 *p++ = c;
5268                 len--;
5269         }
5270
5271 done:
5272         /* write return values */
5273         *curp = p;
5274         *lenp = len;
5275
5276         return result;
5277 }
5278
5279 /*
5280  * Using the given flag table, print a comma separated string into
5281  * the buffer.  End in '*' if the buffer is too short.
5282  */
5283 static char *flag_string(char *buf, int buf_len, u64 flags,
5284                          struct flag_table *table, int table_size)
5285 {
5286         char extra[32];
5287         char *p = buf;
5288         int len = buf_len;
5289         int no_room = 0;
5290         int i;
5291
5292         /* make sure there is at least 2 so we can form "*" */
5293         if (len < 2)
5294                 return "";
5295
5296         len--;  /* leave room for a nul */
5297         for (i = 0; i < table_size; i++) {
5298                 if (flags & table[i].flag) {
5299                         no_room = append_str(buf, &p, &len, table[i].str);
5300                         if (no_room)
5301                                 break;
5302                         flags &= ~table[i].flag;
5303                 }
5304         }
5305
5306         /* any undocumented bits left? */
5307         if (!no_room && flags) {
5308                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5309                 no_room = append_str(buf, &p, &len, extra);
5310         }
5311
5312         /* add * if ran out of room */
5313         if (no_room) {
5314                 /* may need to back up to add space for a '*' */
5315                 if (len == 0)
5316                         --p;
5317                 *p++ = '*';
5318         }
5319
5320         /* add final nul - space already allocated above */
5321         *p = 0;
5322         return buf;
5323 }
5324
5325 /* first 8 CCE error interrupt source names */
5326 static const char * const cce_misc_names[] = {
5327         "CceErrInt",            /* 0 */
5328         "RxeErrInt",            /* 1 */
5329         "MiscErrInt",           /* 2 */
5330         "Reserved3",            /* 3 */
5331         "PioErrInt",            /* 4 */
5332         "SDmaErrInt",           /* 5 */
5333         "EgressErrInt",         /* 6 */
5334         "TxeErrInt"             /* 7 */
5335 };
5336
5337 /*
5338  * Return the miscellaneous error interrupt name.
5339  */
5340 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5341 {
5342         if (source < ARRAY_SIZE(cce_misc_names))
5343                 strncpy(buf, cce_misc_names[source], bsize);
5344         else
5345                 snprintf(buf, bsize, "Reserved%u",
5346                          source + IS_GENERAL_ERR_START);
5347
5348         return buf;
5349 }
5350
5351 /*
5352  * Return the SDMA engine error interrupt name.
5353  */
5354 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5355 {
5356         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5357         return buf;
5358 }
5359
5360 /*
5361  * Return the send context error interrupt name.
5362  */
5363 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5364 {
5365         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5366         return buf;
5367 }
5368
5369 static const char * const various_names[] = {
5370         "PbcInt",
5371         "GpioAssertInt",
5372         "Qsfp1Int",
5373         "Qsfp2Int",
5374         "TCritInt"
5375 };
5376
5377 /*
5378  * Return the various interrupt name.
5379  */
5380 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5381 {
5382         if (source < ARRAY_SIZE(various_names))
5383                 strncpy(buf, various_names[source], bsize);
5384         else
5385                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5386         return buf;
5387 }
5388
5389 /*
5390  * Return the DC interrupt name.
5391  */
5392 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5393 {
5394         static const char * const dc_int_names[] = {
5395                 "common",
5396                 "lcb",
5397                 "8051",
5398                 "lbm"   /* local block merge */
5399         };
5400
5401         if (source < ARRAY_SIZE(dc_int_names))
5402                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5403         else
5404                 snprintf(buf, bsize, "DCInt%u", source);
5405         return buf;
5406 }
5407
5408 static const char * const sdma_int_names[] = {
5409         "SDmaInt",
5410         "SdmaIdleInt",
5411         "SdmaProgressInt",
5412 };
5413
5414 /*
5415  * Return the SDMA engine interrupt name.
5416  */
5417 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5418 {
5419         /* what interrupt */
5420         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5421         /* which engine */
5422         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5423
5424         if (likely(what < 3))
5425                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5426         else
5427                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5428         return buf;
5429 }
5430
5431 /*
5432  * Return the receive available interrupt name.
5433  */
5434 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5435 {
5436         snprintf(buf, bsize, "RcvAvailInt%u", source);
5437         return buf;
5438 }
5439
5440 /*
5441  * Return the receive urgent interrupt name.
5442  */
5443 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5444 {
5445         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5446         return buf;
5447 }
5448
5449 /*
5450  * Return the send credit interrupt name.
5451  */
5452 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5453 {
5454         snprintf(buf, bsize, "SendCreditInt%u", source);
5455         return buf;
5456 }
5457
5458 /*
5459  * Return the reserved interrupt name.
5460  */
5461 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5462 {
5463         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5464         return buf;
5465 }
5466
5467 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5468 {
5469         return flag_string(buf, buf_len, flags,
5470                            cce_err_status_flags,
5471                            ARRAY_SIZE(cce_err_status_flags));
5472 }
5473
5474 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5475 {
5476         return flag_string(buf, buf_len, flags,
5477                            rxe_err_status_flags,
5478                            ARRAY_SIZE(rxe_err_status_flags));
5479 }
5480
5481 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5482 {
5483         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5484                            ARRAY_SIZE(misc_err_status_flags));
5485 }
5486
5487 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5488 {
5489         return flag_string(buf, buf_len, flags,
5490                            pio_err_status_flags,
5491                            ARRAY_SIZE(pio_err_status_flags));
5492 }
5493
5494 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5495 {
5496         return flag_string(buf, buf_len, flags,
5497                            sdma_err_status_flags,
5498                            ARRAY_SIZE(sdma_err_status_flags));
5499 }
5500
5501 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5502 {
5503         return flag_string(buf, buf_len, flags,
5504                            egress_err_status_flags,
5505                            ARRAY_SIZE(egress_err_status_flags));
5506 }
5507
5508 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5509 {
5510         return flag_string(buf, buf_len, flags,
5511                            egress_err_info_flags,
5512                            ARRAY_SIZE(egress_err_info_flags));
5513 }
5514
5515 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5516 {
5517         return flag_string(buf, buf_len, flags,
5518                            send_err_status_flags,
5519                            ARRAY_SIZE(send_err_status_flags));
5520 }
5521
5522 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5523 {
5524         char buf[96];
5525         int i = 0;
5526
5527         /*
5528          * For most these errors, there is nothing that can be done except
5529          * report or record it.
5530          */
5531         dd_dev_info(dd, "CCE Error: %s\n",
5532                     cce_err_status_string(buf, sizeof(buf), reg));
5533
5534         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5535             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5536                 /* this error requires a manual drop into SPC freeze mode */
5537                 /* then a fix up */
5538                 start_freeze_handling(dd->pport, FREEZE_SELF);
5539         }
5540
5541         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5542                 if (reg & (1ull << i)) {
5543                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5544                         /* maintain a counter over all cce_err_status errors */
5545                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5546                 }
5547         }
5548 }
5549
5550 /*
5551  * Check counters for receive errors that do not have an interrupt
5552  * associated with them.
5553  */
5554 #define RCVERR_CHECK_TIME 10
5555 static void update_rcverr_timer(struct timer_list *t)
5556 {
5557         struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5558         struct hfi1_pportdata *ppd = dd->pport;
5559         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5560
5561         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5562             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5563                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5564                 set_link_down_reason(
5565                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5566                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5567                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5568         }
5569         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5570
5571         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5572 }
5573
5574 static int init_rcverr(struct hfi1_devdata *dd)
5575 {
5576         timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5577         /* Assume the hardware counter has been reset */
5578         dd->rcv_ovfl_cnt = 0;
5579         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5580 }
5581
5582 static void free_rcverr(struct hfi1_devdata *dd)
5583 {
5584         if (dd->rcverr_timer.function)
5585                 del_timer_sync(&dd->rcverr_timer);
5586 }
5587
5588 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5589 {
5590         char buf[96];
5591         int i = 0;
5592
5593         dd_dev_info(dd, "Receive Error: %s\n",
5594                     rxe_err_status_string(buf, sizeof(buf), reg));
5595
5596         if (reg & ALL_RXE_FREEZE_ERR) {
5597                 int flags = 0;
5598
5599                 /*
5600                  * Freeze mode recovery is disabled for the errors
5601                  * in RXE_FREEZE_ABORT_MASK
5602                  */
5603                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5604                         flags = FREEZE_ABORT;
5605
5606                 start_freeze_handling(dd->pport, flags);
5607         }
5608
5609         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5610                 if (reg & (1ull << i))
5611                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5612         }
5613 }
5614
5615 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5616 {
5617         char buf[96];
5618         int i = 0;
5619
5620         dd_dev_info(dd, "Misc Error: %s",
5621                     misc_err_status_string(buf, sizeof(buf), reg));
5622         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5623                 if (reg & (1ull << i))
5624                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5625         }
5626 }
5627
5628 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5629 {
5630         char buf[96];
5631         int i = 0;
5632
5633         dd_dev_info(dd, "PIO Error: %s\n",
5634                     pio_err_status_string(buf, sizeof(buf), reg));
5635
5636         if (reg & ALL_PIO_FREEZE_ERR)
5637                 start_freeze_handling(dd->pport, 0);
5638
5639         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5640                 if (reg & (1ull << i))
5641                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5642         }
5643 }
5644
5645 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5646 {
5647         char buf[96];
5648         int i = 0;
5649
5650         dd_dev_info(dd, "SDMA Error: %s\n",
5651                     sdma_err_status_string(buf, sizeof(buf), reg));
5652
5653         if (reg & ALL_SDMA_FREEZE_ERR)
5654                 start_freeze_handling(dd->pport, 0);
5655
5656         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5657                 if (reg & (1ull << i))
5658                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5659         }
5660 }
5661
5662 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5663 {
5664         incr_cntr64(&ppd->port_xmit_discards);
5665 }
5666
5667 static void count_port_inactive(struct hfi1_devdata *dd)
5668 {
5669         __count_port_discards(dd->pport);
5670 }
5671
5672 /*
5673  * We have had a "disallowed packet" error during egress. Determine the
5674  * integrity check which failed, and update relevant error counter, etc.
5675  *
5676  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5677  * bit of state per integrity check, and so we can miss the reason for an
5678  * egress error if more than one packet fails the same integrity check
5679  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5680  */
5681 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5682                                         int vl)
5683 {
5684         struct hfi1_pportdata *ppd = dd->pport;
5685         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5686         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5687         char buf[96];
5688
5689         /* clear down all observed info as quickly as possible after read */
5690         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5691
5692         dd_dev_info(dd,
5693                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5694                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5695
5696         /* Eventually add other counters for each bit */
5697         if (info & PORT_DISCARD_EGRESS_ERRS) {
5698                 int weight, i;
5699
5700                 /*
5701                  * Count all applicable bits as individual errors and
5702                  * attribute them to the packet that triggered this handler.
5703                  * This may not be completely accurate due to limitations
5704                  * on the available hardware error information.  There is
5705                  * a single information register and any number of error
5706                  * packets may have occurred and contributed to it before
5707                  * this routine is called.  This means that:
5708                  * a) If multiple packets with the same error occur before
5709                  *    this routine is called, earlier packets are missed.
5710                  *    There is only a single bit for each error type.
5711                  * b) Errors may not be attributed to the correct VL.
5712                  *    The driver is attributing all bits in the info register
5713                  *    to the packet that triggered this call, but bits
5714                  *    could be an accumulation of different packets with
5715                  *    different VLs.
5716                  * c) A single error packet may have multiple counts attached
5717                  *    to it.  There is no way for the driver to know if
5718                  *    multiple bits set in the info register are due to a
5719                  *    single packet or multiple packets.  The driver assumes
5720                  *    multiple packets.
5721                  */
5722                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5723                 for (i = 0; i < weight; i++) {
5724                         __count_port_discards(ppd);
5725                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5726                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5727                         else if (vl == 15)
5728                                 incr_cntr64(&ppd->port_xmit_discards_vl
5729                                             [C_VL_15]);
5730                 }
5731         }
5732 }
5733
5734 /*
5735  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5736  * register. Does it represent a 'port inactive' error?
5737  */
5738 static inline int port_inactive_err(u64 posn)
5739 {
5740         return (posn >= SEES(TX_LINKDOWN) &&
5741                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5742 }
5743
5744 /*
5745  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5746  * register. Does it represent a 'disallowed packet' error?
5747  */
5748 static inline int disallowed_pkt_err(int posn)
5749 {
5750         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5751                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5752 }
5753
5754 /*
5755  * Input value is a bit position of one of the SDMA engine disallowed
5756  * packet errors.  Return which engine.  Use of this must be guarded by
5757  * disallowed_pkt_err().
5758  */
5759 static inline int disallowed_pkt_engine(int posn)
5760 {
5761         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5762 }
5763
5764 /*
5765  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5766  * be done.
5767  */
5768 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5769 {
5770         struct sdma_vl_map *m;
5771         int vl;
5772
5773         /* range check */
5774         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5775                 return -1;
5776
5777         rcu_read_lock();
5778         m = rcu_dereference(dd->sdma_map);
5779         vl = m->engine_to_vl[engine];
5780         rcu_read_unlock();
5781
5782         return vl;
5783 }
5784
5785 /*
5786  * Translate the send context (sofware index) into a VL.  Return -1 if the
5787  * translation cannot be done.
5788  */
5789 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5790 {
5791         struct send_context_info *sci;
5792         struct send_context *sc;
5793         int i;
5794
5795         sci = &dd->send_contexts[sw_index];
5796
5797         /* there is no information for user (PSM) and ack contexts */
5798         if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5799                 return -1;
5800
5801         sc = sci->sc;
5802         if (!sc)
5803                 return -1;
5804         if (dd->vld[15].sc == sc)
5805                 return 15;
5806         for (i = 0; i < num_vls; i++)
5807                 if (dd->vld[i].sc == sc)
5808                         return i;
5809
5810         return -1;
5811 }
5812
5813 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5814 {
5815         u64 reg_copy = reg, handled = 0;
5816         char buf[96];
5817         int i = 0;
5818
5819         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5820                 start_freeze_handling(dd->pport, 0);
5821         else if (is_ax(dd) &&
5822                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5823                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5824                 start_freeze_handling(dd->pport, 0);
5825
5826         while (reg_copy) {
5827                 int posn = fls64(reg_copy);
5828                 /* fls64() returns a 1-based offset, we want it zero based */
5829                 int shift = posn - 1;
5830                 u64 mask = 1ULL << shift;
5831
5832                 if (port_inactive_err(shift)) {
5833                         count_port_inactive(dd);
5834                         handled |= mask;
5835                 } else if (disallowed_pkt_err(shift)) {
5836                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5837
5838                         handle_send_egress_err_info(dd, vl);
5839                         handled |= mask;
5840                 }
5841                 reg_copy &= ~mask;
5842         }
5843
5844         reg &= ~handled;
5845
5846         if (reg)
5847                 dd_dev_info(dd, "Egress Error: %s\n",
5848                             egress_err_status_string(buf, sizeof(buf), reg));
5849
5850         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5851                 if (reg & (1ull << i))
5852                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5853         }
5854 }
5855
5856 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5857 {
5858         char buf[96];
5859         int i = 0;
5860
5861         dd_dev_info(dd, "Send Error: %s\n",
5862                     send_err_status_string(buf, sizeof(buf), reg));
5863
5864         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5865                 if (reg & (1ull << i))
5866                         incr_cntr64(&dd->send_err_status_cnt[i]);
5867         }
5868 }
5869
5870 /*
5871  * The maximum number of times the error clear down will loop before
5872  * blocking a repeating error.  This value is arbitrary.
5873  */
5874 #define MAX_CLEAR_COUNT 20
5875
5876 /*
5877  * Clear and handle an error register.  All error interrupts are funneled
5878  * through here to have a central location to correctly handle single-
5879  * or multi-shot errors.
5880  *
5881  * For non per-context registers, call this routine with a context value
5882  * of 0 so the per-context offset is zero.
5883  *
5884  * If the handler loops too many times, assume that something is wrong
5885  * and can't be fixed, so mask the error bits.
5886  */
5887 static void interrupt_clear_down(struct hfi1_devdata *dd,
5888                                  u32 context,
5889                                  const struct err_reg_info *eri)
5890 {
5891         u64 reg;
5892         u32 count;
5893
5894         /* read in a loop until no more errors are seen */
5895         count = 0;
5896         while (1) {
5897                 reg = read_kctxt_csr(dd, context, eri->status);
5898                 if (reg == 0)
5899                         break;
5900                 write_kctxt_csr(dd, context, eri->clear, reg);
5901                 if (likely(eri->handler))
5902                         eri->handler(dd, context, reg);
5903                 count++;
5904                 if (count > MAX_CLEAR_COUNT) {
5905                         u64 mask;
5906
5907                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5908                                    eri->desc, reg);
5909                         /*
5910                          * Read-modify-write so any other masked bits
5911                          * remain masked.
5912                          */
5913                         mask = read_kctxt_csr(dd, context, eri->mask);
5914                         mask &= ~reg;
5915                         write_kctxt_csr(dd, context, eri->mask, mask);
5916                         break;
5917                 }
5918         }
5919 }
5920
5921 /*
5922  * CCE block "misc" interrupt.  Source is < 16.
5923  */
5924 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5925 {
5926         const struct err_reg_info *eri = &misc_errs[source];
5927
5928         if (eri->handler) {
5929                 interrupt_clear_down(dd, 0, eri);
5930         } else {
5931                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5932                            source);
5933         }
5934 }
5935
5936 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5937 {
5938         return flag_string(buf, buf_len, flags,
5939                            sc_err_status_flags,
5940                            ARRAY_SIZE(sc_err_status_flags));
5941 }
5942
5943 /*
5944  * Send context error interrupt.  Source (hw_context) is < 160.
5945  *
5946  * All send context errors cause the send context to halt.  The normal
5947  * clear-down mechanism cannot be used because we cannot clear the
5948  * error bits until several other long-running items are done first.
5949  * This is OK because with the context halted, nothing else is going
5950  * to happen on it anyway.
5951  */
5952 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5953                                 unsigned int hw_context)
5954 {
5955         struct send_context_info *sci;
5956         struct send_context *sc;
5957         char flags[96];
5958         u64 status;
5959         u32 sw_index;
5960         int i = 0;
5961         unsigned long irq_flags;
5962
5963         sw_index = dd->hw_to_sw[hw_context];
5964         if (sw_index >= dd->num_send_contexts) {
5965                 dd_dev_err(dd,
5966                            "out of range sw index %u for send context %u\n",
5967                            sw_index, hw_context);
5968                 return;
5969         }
5970         sci = &dd->send_contexts[sw_index];
5971         spin_lock_irqsave(&dd->sc_lock, irq_flags);
5972         sc = sci->sc;
5973         if (!sc) {
5974                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5975                            sw_index, hw_context);
5976                 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5977                 return;
5978         }
5979
5980         /* tell the software that a halt has begun */
5981         sc_stop(sc, SCF_HALTED);
5982
5983         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5984
5985         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5986                     send_context_err_status_string(flags, sizeof(flags),
5987                                                    status));
5988
5989         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5990                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5991
5992         /*
5993          * Automatically restart halted kernel contexts out of interrupt
5994          * context.  User contexts must ask the driver to restart the context.
5995          */
5996         if (sc->type != SC_USER)
5997                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5998         spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5999
6000         /*
6001          * Update the counters for the corresponding status bits.
6002          * Note that these particular counters are aggregated over all
6003          * 160 contexts.
6004          */
6005         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6006                 if (status & (1ull << i))
6007                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6008         }
6009 }
6010
6011 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6012                                 unsigned int source, u64 status)
6013 {
6014         struct sdma_engine *sde;
6015         int i = 0;
6016
6017         sde = &dd->per_sdma[source];
6018 #ifdef CONFIG_SDMA_VERBOSITY
6019         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6020                    slashstrip(__FILE__), __LINE__, __func__);
6021         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6022                    sde->this_idx, source, (unsigned long long)status);
6023 #endif
6024         sde->err_cnt++;
6025         sdma_engine_error(sde, status);
6026
6027         /*
6028         * Update the counters for the corresponding status bits.
6029         * Note that these particular counters are aggregated over
6030         * all 16 DMA engines.
6031         */
6032         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6033                 if (status & (1ull << i))
6034                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6035         }
6036 }
6037
6038 /*
6039  * CCE block SDMA error interrupt.  Source is < 16.
6040  */
6041 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6042 {
6043 #ifdef CONFIG_SDMA_VERBOSITY
6044         struct sdma_engine *sde = &dd->per_sdma[source];
6045
6046         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6047                    slashstrip(__FILE__), __LINE__, __func__);
6048         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6049                    source);
6050         sdma_dumpstate(sde);
6051 #endif
6052         interrupt_clear_down(dd, source, &sdma_eng_err);
6053 }
6054
6055 /*
6056  * CCE block "various" interrupt.  Source is < 8.
6057  */
6058 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6059 {
6060         const struct err_reg_info *eri = &various_err[source];
6061
6062         /*
6063          * TCritInt cannot go through interrupt_clear_down()
6064          * because it is not a second tier interrupt. The handler
6065          * should be called directly.
6066          */
6067         if (source == TCRIT_INT_SOURCE)
6068                 handle_temp_err(dd);
6069         else if (eri->handler)
6070                 interrupt_clear_down(dd, 0, eri);
6071         else
6072                 dd_dev_info(dd,
6073                             "%s: Unimplemented/reserved interrupt %d\n",
6074                             __func__, source);
6075 }
6076
6077 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6078 {
6079         /* src_ctx is always zero */
6080         struct hfi1_pportdata *ppd = dd->pport;
6081         unsigned long flags;
6082         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6083
6084         if (reg & QSFP_HFI0_MODPRST_N) {
6085                 if (!qsfp_mod_present(ppd)) {
6086                         dd_dev_info(dd, "%s: QSFP module removed\n",
6087                                     __func__);
6088
6089                         ppd->driver_link_ready = 0;
6090                         /*
6091                          * Cable removed, reset all our information about the
6092                          * cache and cable capabilities
6093                          */
6094
6095                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6096                         /*
6097                          * We don't set cache_refresh_required here as we expect
6098                          * an interrupt when a cable is inserted
6099                          */
6100                         ppd->qsfp_info.cache_valid = 0;
6101                         ppd->qsfp_info.reset_needed = 0;
6102                         ppd->qsfp_info.limiting_active = 0;
6103                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6104                                                flags);
6105                         /* Invert the ModPresent pin now to detect plug-in */
6106                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6107                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6108
6109                         if ((ppd->offline_disabled_reason >
6110                           HFI1_ODR_MASK(
6111                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6112                           (ppd->offline_disabled_reason ==
6113                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6114                                 ppd->offline_disabled_reason =
6115                                 HFI1_ODR_MASK(
6116                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6117
6118                         if (ppd->host_link_state == HLS_DN_POLL) {
6119                                 /*
6120                                  * The link is still in POLL. This means
6121                                  * that the normal link down processing
6122                                  * will not happen. We have to do it here
6123                                  * before turning the DC off.
6124                                  */
6125                                 queue_work(ppd->link_wq, &ppd->link_down_work);
6126                         }
6127                 } else {
6128                         dd_dev_info(dd, "%s: QSFP module inserted\n",
6129                                     __func__);
6130
6131                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6132                         ppd->qsfp_info.cache_valid = 0;
6133                         ppd->qsfp_info.cache_refresh_required = 1;
6134                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6135                                                flags);
6136
6137                         /*
6138                          * Stop inversion of ModPresent pin to detect
6139                          * removal of the cable
6140                          */
6141                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6142                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6143                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6144
6145                         ppd->offline_disabled_reason =
6146                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6147                 }
6148         }
6149
6150         if (reg & QSFP_HFI0_INT_N) {
6151                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6152                             __func__);
6153                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6154                 ppd->qsfp_info.check_interrupt_flags = 1;
6155                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6156         }
6157
6158         /* Schedule the QSFP work only if there is a cable attached. */
6159         if (qsfp_mod_present(ppd))
6160                 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6161 }
6162
6163 static int request_host_lcb_access(struct hfi1_devdata *dd)
6164 {
6165         int ret;
6166
6167         ret = do_8051_command(dd, HCMD_MISC,
6168                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6169                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6170         if (ret != HCMD_SUCCESS) {
6171                 dd_dev_err(dd, "%s: command failed with error %d\n",
6172                            __func__, ret);
6173         }
6174         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6175 }
6176
6177 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6178 {
6179         int ret;
6180
6181         ret = do_8051_command(dd, HCMD_MISC,
6182                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6183                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6184         if (ret != HCMD_SUCCESS) {
6185                 dd_dev_err(dd, "%s: command failed with error %d\n",
6186                            __func__, ret);
6187         }
6188         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6189 }
6190
6191 /*
6192  * Set the LCB selector - allow host access.  The DCC selector always
6193  * points to the host.
6194  */
6195 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6196 {
6197         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6198                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6199                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6200 }
6201
6202 /*
6203  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6204  * points to the host.
6205  */
6206 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6207 {
6208         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6209                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6210 }
6211
6212 /*
6213  * Acquire LCB access from the 8051.  If the host already has access,
6214  * just increment a counter.  Otherwise, inform the 8051 that the
6215  * host is taking access.
6216  *
6217  * Returns:
6218  *      0 on success
6219  *      -EBUSY if the 8051 has control and cannot be disturbed
6220  *      -errno if unable to acquire access from the 8051
6221  */
6222 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6223 {
6224         struct hfi1_pportdata *ppd = dd->pport;
6225         int ret = 0;
6226
6227         /*
6228          * Use the host link state lock so the operation of this routine
6229          * { link state check, selector change, count increment } can occur
6230          * as a unit against a link state change.  Otherwise there is a
6231          * race between the state change and the count increment.
6232          */
6233         if (sleep_ok) {
6234                 mutex_lock(&ppd->hls_lock);
6235         } else {
6236                 while (!mutex_trylock(&ppd->hls_lock))
6237                         udelay(1);
6238         }
6239
6240         /* this access is valid only when the link is up */
6241         if (ppd->host_link_state & HLS_DOWN) {
6242                 dd_dev_info(dd, "%s: link state %s not up\n",
6243                             __func__, link_state_name(ppd->host_link_state));
6244                 ret = -EBUSY;
6245                 goto done;
6246         }
6247
6248         if (dd->lcb_access_count == 0) {
6249                 ret = request_host_lcb_access(dd);
6250                 if (ret) {
6251                         dd_dev_err(dd,
6252                                    "%s: unable to acquire LCB access, err %d\n",
6253                                    __func__, ret);
6254                         goto done;
6255                 }
6256                 set_host_lcb_access(dd);
6257         }
6258         dd->lcb_access_count++;
6259 done:
6260         mutex_unlock(&ppd->hls_lock);
6261         return ret;
6262 }
6263
6264 /*
6265  * Release LCB access by decrementing the use count.  If the count is moving
6266  * from 1 to 0, inform 8051 that it has control back.
6267  *
6268  * Returns:
6269  *      0 on success
6270  *      -errno if unable to release access to the 8051
6271  */
6272 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6273 {
6274         int ret = 0;
6275
6276         /*
6277          * Use the host link state lock because the acquire needed it.
6278          * Here, we only need to keep { selector change, count decrement }
6279          * as a unit.
6280          */
6281         if (sleep_ok) {
6282                 mutex_lock(&dd->pport->hls_lock);
6283         } else {
6284                 while (!mutex_trylock(&dd->pport->hls_lock))
6285                         udelay(1);
6286         }
6287
6288         if (dd->lcb_access_count == 0) {
6289                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6290                            __func__);
6291                 goto done;
6292         }
6293
6294         if (dd->lcb_access_count == 1) {
6295                 set_8051_lcb_access(dd);
6296                 ret = request_8051_lcb_access(dd);
6297                 if (ret) {
6298                         dd_dev_err(dd,
6299                                    "%s: unable to release LCB access, err %d\n",
6300                                    __func__, ret);
6301                         /* restore host access if the grant didn't work */
6302                         set_host_lcb_access(dd);
6303                         goto done;
6304                 }
6305         }
6306         dd->lcb_access_count--;
6307 done:
6308         mutex_unlock(&dd->pport->hls_lock);
6309         return ret;
6310 }
6311
6312 /*
6313  * Initialize LCB access variables and state.  Called during driver load,
6314  * after most of the initialization is finished.
6315  *
6316  * The DC default is LCB access on for the host.  The driver defaults to
6317  * leaving access to the 8051.  Assign access now - this constrains the call
6318  * to this routine to be after all LCB set-up is done.  In particular, after
6319  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6320  */
6321 static void init_lcb_access(struct hfi1_devdata *dd)
6322 {
6323         dd->lcb_access_count = 0;
6324 }
6325
6326 /*
6327  * Write a response back to a 8051 request.
6328  */
6329 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6330 {
6331         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6332                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6333                   (u64)return_code <<
6334                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6335                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6336 }
6337
6338 /*
6339  * Handle host requests from the 8051.
6340  */
6341 static void handle_8051_request(struct hfi1_pportdata *ppd)
6342 {
6343         struct hfi1_devdata *dd = ppd->dd;
6344         u64 reg;
6345         u16 data = 0;
6346         u8 type;
6347
6348         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6349         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6350                 return; /* no request */
6351
6352         /* zero out COMPLETED so the response is seen */
6353         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6354
6355         /* extract request details */
6356         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6357                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6358         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6359                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6360
6361         switch (type) {
6362         case HREQ_LOAD_CONFIG:
6363         case HREQ_SAVE_CONFIG:
6364         case HREQ_READ_CONFIG:
6365         case HREQ_SET_TX_EQ_ABS:
6366         case HREQ_SET_TX_EQ_REL:
6367         case HREQ_ENABLE:
6368                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6369                             type);
6370                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6371                 break;
6372         case HREQ_LCB_RESET:
6373                 /* Put the LCB, RX FPE and TX FPE into reset */
6374                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6375                 /* Make sure the write completed */
6376                 (void)read_csr(dd, DCC_CFG_RESET);
6377                 /* Hold the reset long enough to take effect */
6378                 udelay(1);
6379                 /* Take the LCB, RX FPE and TX FPE out of reset */
6380                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6381                 hreq_response(dd, HREQ_SUCCESS, 0);
6382
6383                 break;
6384         case HREQ_CONFIG_DONE:
6385                 hreq_response(dd, HREQ_SUCCESS, 0);
6386                 break;
6387
6388         case HREQ_INTERFACE_TEST:
6389                 hreq_response(dd, HREQ_SUCCESS, data);
6390                 break;
6391         default:
6392                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6393                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6394                 break;
6395         }
6396 }
6397
6398 /*
6399  * Set up allocation unit vaulue.
6400  */
6401 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6402 {
6403         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6404
6405         /* do not modify other values in the register */
6406         reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6407         reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6408         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6409 }
6410
6411 /*
6412  * Set up initial VL15 credits of the remote.  Assumes the rest of
6413  * the CM credit registers are zero from a previous global or credit reset.
6414  * Shared limit for VL15 will always be 0.
6415  */
6416 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6417 {
6418         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6419
6420         /* set initial values for total and shared credit limit */
6421         reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6422                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6423
6424         /*
6425          * Set total limit to be equal to VL15 credits.
6426          * Leave shared limit at 0.
6427          */
6428         reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6429         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6430
6431         write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6432                   << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6433 }
6434
6435 /*
6436  * Zero all credit details from the previous connection and
6437  * reset the CM manager's internal counters.
6438  */
6439 void reset_link_credits(struct hfi1_devdata *dd)
6440 {
6441         int i;
6442
6443         /* remove all previous VL credit limits */
6444         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6445                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6446         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6447         write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6448         /* reset the CM block */
6449         pio_send_control(dd, PSC_CM_RESET);
6450         /* reset cached value */
6451         dd->vl15buf_cached = 0;
6452 }
6453
6454 /* convert a vCU to a CU */
6455 static u32 vcu_to_cu(u8 vcu)
6456 {
6457         return 1 << vcu;
6458 }
6459
6460 /* convert a CU to a vCU */
6461 static u8 cu_to_vcu(u32 cu)
6462 {
6463         return ilog2(cu);
6464 }
6465
6466 /* convert a vAU to an AU */
6467 static u32 vau_to_au(u8 vau)
6468 {
6469         return 8 * (1 << vau);
6470 }
6471
6472 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6473 {
6474         ppd->sm_trap_qp = 0x0;
6475         ppd->sa_qp = 0x1;
6476 }
6477
6478 /*
6479  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6480  */
6481 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6482 {
6483         u64 reg;
6484
6485         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6486         write_csr(dd, DC_LCB_CFG_RUN, 0);
6487         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6488         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6489                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6490         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6491         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6492         reg = read_csr(dd, DCC_CFG_RESET);
6493         write_csr(dd, DCC_CFG_RESET, reg |
6494                   DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6495         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6496         if (!abort) {
6497                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6498                 write_csr(dd, DCC_CFG_RESET, reg);
6499                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6500         }
6501 }
6502
6503 /*
6504  * This routine should be called after the link has been transitioned to
6505  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6506  * reset).
6507  *
6508  * The expectation is that the caller of this routine would have taken
6509  * care of properly transitioning the link into the correct state.
6510  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6511  *       before calling this function.
6512  */
6513 static void _dc_shutdown(struct hfi1_devdata *dd)
6514 {
6515         lockdep_assert_held(&dd->dc8051_lock);
6516
6517         if (dd->dc_shutdown)
6518                 return;
6519
6520         dd->dc_shutdown = 1;
6521         /* Shutdown the LCB */
6522         lcb_shutdown(dd, 1);
6523         /*
6524          * Going to OFFLINE would have causes the 8051 to put the
6525          * SerDes into reset already. Just need to shut down the 8051,
6526          * itself.
6527          */
6528         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6529 }
6530
6531 static void dc_shutdown(struct hfi1_devdata *dd)
6532 {
6533         mutex_lock(&dd->dc8051_lock);
6534         _dc_shutdown(dd);
6535         mutex_unlock(&dd->dc8051_lock);
6536 }
6537
6538 /*
6539  * Calling this after the DC has been brought out of reset should not
6540  * do any damage.
6541  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6542  *       before calling this function.
6543  */
6544 static void _dc_start(struct hfi1_devdata *dd)
6545 {
6546         lockdep_assert_held(&dd->dc8051_lock);
6547
6548         if (!dd->dc_shutdown)
6549                 return;
6550
6551         /* Take the 8051 out of reset */
6552         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6553         /* Wait until 8051 is ready */
6554         if (wait_fm_ready(dd, TIMEOUT_8051_START))
6555                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6556                            __func__);
6557
6558         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6559         write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6560         /* lcb_shutdown() with abort=1 does not restore these */
6561         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6562         dd->dc_shutdown = 0;
6563 }
6564
6565 static void dc_start(struct hfi1_devdata *dd)
6566 {
6567         mutex_lock(&dd->dc8051_lock);
6568         _dc_start(dd);
6569         mutex_unlock(&dd->dc8051_lock);
6570 }
6571
6572 /*
6573  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6574  */
6575 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6576 {
6577         u64 rx_radr, tx_radr;
6578         u32 version;
6579
6580         if (dd->icode != ICODE_FPGA_EMULATION)
6581                 return;
6582
6583         /*
6584          * These LCB defaults on emulator _s are good, nothing to do here:
6585          *      LCB_CFG_TX_FIFOS_RADR
6586          *      LCB_CFG_RX_FIFOS_RADR
6587          *      LCB_CFG_LN_DCLK
6588          *      LCB_CFG_IGNORE_LOST_RCLK
6589          */
6590         if (is_emulator_s(dd))
6591                 return;
6592         /* else this is _p */
6593
6594         version = emulator_rev(dd);
6595         if (!is_ax(dd))
6596                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6597
6598         if (version <= 0x12) {
6599                 /* release 0x12 and below */
6600
6601                 /*
6602                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6603                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6604                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6605                  */
6606                 rx_radr =
6607                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6608                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6609                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6610                 /*
6611                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6612                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6613                  */
6614                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6615         } else if (version <= 0x18) {
6616                 /* release 0x13 up to 0x18 */
6617                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6618                 rx_radr =
6619                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6620                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6621                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6622                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6623         } else if (version == 0x19) {
6624                 /* release 0x19 */
6625                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6626                 rx_radr =
6627                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6628                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6629                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6630                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6631         } else if (version == 0x1a) {
6632                 /* release 0x1a */
6633                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6634                 rx_radr =
6635                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6636                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6637                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6638                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6639                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6640         } else {
6641                 /* release 0x1b and higher */
6642                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6643                 rx_radr =
6644                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6645                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6646                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6647                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6648         }
6649
6650         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6651         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6652         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6653                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6654         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6655 }
6656
6657 /*
6658  * Handle a SMA idle message
6659  *
6660  * This is a work-queue function outside of the interrupt.
6661  */
6662 void handle_sma_message(struct work_struct *work)
6663 {
6664         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6665                                                         sma_message_work);
6666         struct hfi1_devdata *dd = ppd->dd;
6667         u64 msg;
6668         int ret;
6669
6670         /*
6671          * msg is bytes 1-4 of the 40-bit idle message - the command code
6672          * is stripped off
6673          */
6674         ret = read_idle_sma(dd, &msg);
6675         if (ret)
6676                 return;
6677         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6678         /*
6679          * React to the SMA message.  Byte[1] (0 for us) is the command.
6680          */
6681         switch (msg & 0xff) {
6682         case SMA_IDLE_ARM:
6683                 /*
6684                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6685                  * State Transitions
6686                  *
6687                  * Only expected in INIT or ARMED, discard otherwise.
6688                  */
6689                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6690                         ppd->neighbor_normal = 1;
6691                 break;
6692         case SMA_IDLE_ACTIVE:
6693                 /*
6694                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6695                  * State Transitions
6696                  *
6697                  * Can activate the node.  Discard otherwise.
6698                  */
6699                 if (ppd->host_link_state == HLS_UP_ARMED &&
6700                     ppd->is_active_optimize_enabled) {
6701                         ppd->neighbor_normal = 1;
6702                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6703                         if (ret)
6704                                 dd_dev_err(
6705                                         dd,
6706                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6707                                         __func__);
6708                 }
6709                 break;
6710         default:
6711                 dd_dev_err(dd,
6712                            "%s: received unexpected SMA idle message 0x%llx\n",
6713                            __func__, msg);
6714                 break;
6715         }
6716 }
6717
6718 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6719 {
6720         u64 rcvctrl;
6721         unsigned long flags;
6722
6723         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6724         rcvctrl = read_csr(dd, RCV_CTRL);
6725         rcvctrl |= add;
6726         rcvctrl &= ~clear;
6727         write_csr(dd, RCV_CTRL, rcvctrl);
6728         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6729 }
6730
6731 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6732 {
6733         adjust_rcvctrl(dd, add, 0);
6734 }
6735
6736 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6737 {
6738         adjust_rcvctrl(dd, 0, clear);
6739 }
6740
6741 /*
6742  * Called from all interrupt handlers to start handling an SPC freeze.
6743  */
6744 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6745 {
6746         struct hfi1_devdata *dd = ppd->dd;
6747         struct send_context *sc;
6748         int i;
6749         int sc_flags;
6750
6751         if (flags & FREEZE_SELF)
6752                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6753
6754         /* enter frozen mode */
6755         dd->flags |= HFI1_FROZEN;
6756
6757         /* notify all SDMA engines that they are going into a freeze */
6758         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6759
6760         sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6761                                               SCF_LINK_DOWN : 0);
6762         /* do halt pre-handling on all enabled send contexts */
6763         for (i = 0; i < dd->num_send_contexts; i++) {
6764                 sc = dd->send_contexts[i].sc;
6765                 if (sc && (sc->flags & SCF_ENABLED))
6766                         sc_stop(sc, sc_flags);
6767         }
6768
6769         /* Send context are frozen. Notify user space */
6770         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6771
6772         if (flags & FREEZE_ABORT) {
6773                 dd_dev_err(dd,
6774                            "Aborted freeze recovery. Please REBOOT system\n");
6775                 return;
6776         }
6777         /* queue non-interrupt handler */
6778         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6779 }
6780
6781 /*
6782  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6783  * depending on the "freeze" parameter.
6784  *
6785  * No need to return an error if it times out, our only option
6786  * is to proceed anyway.
6787  */
6788 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6789 {
6790         unsigned long timeout;
6791         u64 reg;
6792
6793         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6794         while (1) {
6795                 reg = read_csr(dd, CCE_STATUS);
6796                 if (freeze) {
6797                         /* waiting until all indicators are set */
6798                         if ((reg & ALL_FROZE) == ALL_FROZE)
6799                                 return; /* all done */
6800                 } else {
6801                         /* waiting until all indicators are clear */
6802                         if ((reg & ALL_FROZE) == 0)
6803                                 return; /* all done */
6804                 }
6805
6806                 if (time_after(jiffies, timeout)) {
6807                         dd_dev_err(dd,
6808                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6809                                    freeze ? "" : "un", reg & ALL_FROZE,
6810                                    freeze ? ALL_FROZE : 0ull);
6811                         return;
6812                 }
6813                 usleep_range(80, 120);
6814         }
6815 }
6816
6817 /*
6818  * Do all freeze handling for the RXE block.
6819  */
6820 static void rxe_freeze(struct hfi1_devdata *dd)
6821 {
6822         int i;
6823         struct hfi1_ctxtdata *rcd;
6824
6825         /* disable port */
6826         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6827
6828         /* disable all receive contexts */
6829         for (i = 0; i < dd->num_rcv_contexts; i++) {
6830                 rcd = hfi1_rcd_get_by_index(dd, i);
6831                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6832                 hfi1_rcd_put(rcd);
6833         }
6834 }
6835
6836 /*
6837  * Unfreeze handling for the RXE block - kernel contexts only.
6838  * This will also enable the port.  User contexts will do unfreeze
6839  * handling on a per-context basis as they call into the driver.
6840  *
6841  */
6842 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6843 {
6844         u32 rcvmask;
6845         u16 i;
6846         struct hfi1_ctxtdata *rcd;
6847
6848         /* enable all kernel contexts */
6849         for (i = 0; i < dd->num_rcv_contexts; i++) {
6850                 rcd = hfi1_rcd_get_by_index(dd, i);
6851
6852                 /* Ensure all non-user contexts(including vnic) are enabled */
6853                 if (!rcd ||
6854                     (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6855                         hfi1_rcd_put(rcd);
6856                         continue;
6857                 }
6858                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6859                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6860                 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6861                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6862                 hfi1_rcvctrl(dd, rcvmask, rcd);
6863                 hfi1_rcd_put(rcd);
6864         }
6865
6866         /* enable port */
6867         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6868 }
6869
6870 /*
6871  * Non-interrupt SPC freeze handling.
6872  *
6873  * This is a work-queue function outside of the triggering interrupt.
6874  */
6875 void handle_freeze(struct work_struct *work)
6876 {
6877         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6878                                                                 freeze_work);
6879         struct hfi1_devdata *dd = ppd->dd;
6880
6881         /* wait for freeze indicators on all affected blocks */
6882         wait_for_freeze_status(dd, 1);
6883
6884         /* SPC is now frozen */
6885
6886         /* do send PIO freeze steps */
6887         pio_freeze(dd);
6888
6889         /* do send DMA freeze steps */
6890         sdma_freeze(dd);
6891
6892         /* do send egress freeze steps - nothing to do */
6893
6894         /* do receive freeze steps */
6895         rxe_freeze(dd);
6896
6897         /*
6898          * Unfreeze the hardware - clear the freeze, wait for each
6899          * block's frozen bit to clear, then clear the frozen flag.
6900          */
6901         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6902         wait_for_freeze_status(dd, 0);
6903
6904         if (is_ax(dd)) {
6905                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6906                 wait_for_freeze_status(dd, 1);
6907                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6908                 wait_for_freeze_status(dd, 0);
6909         }
6910
6911         /* do send PIO unfreeze steps for kernel contexts */
6912         pio_kernel_unfreeze(dd);
6913
6914         /* do send DMA unfreeze steps */
6915         sdma_unfreeze(dd);
6916
6917         /* do send egress unfreeze steps - nothing to do */
6918
6919         /* do receive unfreeze steps for kernel contexts */
6920         rxe_kernel_unfreeze(dd);
6921
6922         /*
6923          * The unfreeze procedure touches global device registers when
6924          * it disables and re-enables RXE. Mark the device unfrozen
6925          * after all that is done so other parts of the driver waiting
6926          * for the device to unfreeze don't do things out of order.
6927          *
6928          * The above implies that the meaning of HFI1_FROZEN flag is
6929          * "Device has gone into freeze mode and freeze mode handling
6930          * is still in progress."
6931          *
6932          * The flag will be removed when freeze mode processing has
6933          * completed.
6934          */
6935         dd->flags &= ~HFI1_FROZEN;
6936         wake_up(&dd->event_queue);
6937
6938         /* no longer frozen */
6939 }
6940
6941 /**
6942  * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6943  * counters.
6944  * @ppd: info of physical Hfi port
6945  * @link_width: new link width after link up or downgrade
6946  *
6947  * Update the PortXmitWait and PortVlXmitWait counters after
6948  * a link up or downgrade event to reflect a link width change.
6949  */
6950 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6951 {
6952         int i;
6953         u16 tx_width;
6954         u16 link_speed;
6955
6956         tx_width = tx_link_width(link_width);
6957         link_speed = get_link_speed(ppd->link_speed_active);
6958
6959         /*
6960          * There are C_VL_COUNT number of PortVLXmitWait counters.
6961          * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6962          */
6963         for (i = 0; i < C_VL_COUNT + 1; i++)
6964                 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6965 }
6966
6967 /*
6968  * Handle a link up interrupt from the 8051.
6969  *
6970  * This is a work-queue function outside of the interrupt.
6971  */
6972 void handle_link_up(struct work_struct *work)
6973 {
6974         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6975                                                   link_up_work);
6976         struct hfi1_devdata *dd = ppd->dd;
6977
6978         set_link_state(ppd, HLS_UP_INIT);
6979
6980         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6981         read_ltp_rtt(dd);
6982         /*
6983          * OPA specifies that certain counters are cleared on a transition
6984          * to link up, so do that.
6985          */
6986         clear_linkup_counters(dd);
6987         /*
6988          * And (re)set link up default values.
6989          */
6990         set_linkup_defaults(ppd);
6991
6992         /*
6993          * Set VL15 credits. Use cached value from verify cap interrupt.
6994          * In case of quick linkup or simulator, vl15 value will be set by
6995          * handle_linkup_change. VerifyCap interrupt handler will not be
6996          * called in those scenarios.
6997          */
6998         if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6999                 set_up_vl15(dd, dd->vl15buf_cached);
7000
7001         /* enforce link speed enabled */
7002         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7003                 /* oops - current speed is not enabled, bounce */
7004                 dd_dev_err(dd,
7005                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7006                            ppd->link_speed_active, ppd->link_speed_enabled);
7007                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7008                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
7009                 set_link_state(ppd, HLS_DN_OFFLINE);
7010                 start_link(ppd);
7011         }
7012 }
7013
7014 /*
7015  * Several pieces of LNI information were cached for SMA in ppd.
7016  * Reset these on link down
7017  */
7018 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7019 {
7020         ppd->neighbor_guid = 0;
7021         ppd->neighbor_port_number = 0;
7022         ppd->neighbor_type = 0;
7023         ppd->neighbor_fm_security = 0;
7024 }
7025
7026 static const char * const link_down_reason_strs[] = {
7027         [OPA_LINKDOWN_REASON_NONE] = "None",
7028         [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7029         [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7030         [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7031         [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7032         [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7033         [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7034         [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7035         [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7036         [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7037         [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7038         [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7039         [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7040         [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7041         [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7042         [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7043         [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7044         [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7045         [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7046         [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7047         [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7048         [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7049         [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7050         [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7051         [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7052         [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7053         [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7054         [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7055         [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7056         [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7057         [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7058         [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7059         [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7060                                         "Excessive buffer overrun",
7061         [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7062         [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7063         [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7064         [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7065         [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7066         [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7067         [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7068         [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7069                                         "Local media not installed",
7070         [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7071         [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7072         [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7073                                         "End to end not installed",
7074         [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7075         [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7076         [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7077         [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7078         [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7079         [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7080 };
7081
7082 /* return the neighbor link down reason string */
7083 static const char *link_down_reason_str(u8 reason)
7084 {
7085         const char *str = NULL;
7086
7087         if (reason < ARRAY_SIZE(link_down_reason_strs))
7088                 str = link_down_reason_strs[reason];
7089         if (!str)
7090                 str = "(invalid)";
7091
7092         return str;
7093 }
7094
7095 /*
7096  * Handle a link down interrupt from the 8051.
7097  *
7098  * This is a work-queue function outside of the interrupt.
7099  */
7100 void handle_link_down(struct work_struct *work)
7101 {
7102         u8 lcl_reason, neigh_reason = 0;
7103         u8 link_down_reason;
7104         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7105                                                   link_down_work);
7106         int was_up;
7107         static const char ldr_str[] = "Link down reason: ";
7108
7109         if ((ppd->host_link_state &
7110              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7111              ppd->port_type == PORT_TYPE_FIXED)
7112                 ppd->offline_disabled_reason =
7113                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7114
7115         /* Go offline first, then deal with reading/writing through 8051 */
7116         was_up = !!(ppd->host_link_state & HLS_UP);
7117         set_link_state(ppd, HLS_DN_OFFLINE);
7118         xchg(&ppd->is_link_down_queued, 0);
7119
7120         if (was_up) {
7121                 lcl_reason = 0;
7122                 /* link down reason is only valid if the link was up */
7123                 read_link_down_reason(ppd->dd, &link_down_reason);
7124                 switch (link_down_reason) {
7125                 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7126                         /* the link went down, no idle message reason */
7127                         dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7128                                     ldr_str);
7129                         break;
7130                 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7131                         /*
7132                          * The neighbor reason is only valid if an idle message
7133                          * was received for it.
7134                          */
7135                         read_planned_down_reason_code(ppd->dd, &neigh_reason);
7136                         dd_dev_info(ppd->dd,
7137                                     "%sNeighbor link down message %d, %s\n",
7138                                     ldr_str, neigh_reason,
7139                                     link_down_reason_str(neigh_reason));
7140                         break;
7141                 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7142                         dd_dev_info(ppd->dd,
7143                                     "%sHost requested link to go offline\n",
7144                                     ldr_str);
7145                         break;
7146                 default:
7147                         dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7148                                     ldr_str, link_down_reason);
7149                         break;
7150                 }
7151
7152                 /*
7153                  * If no reason, assume peer-initiated but missed
7154                  * LinkGoingDown idle flits.
7155                  */
7156                 if (neigh_reason == 0)
7157                         lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7158         } else {
7159                 /* went down while polling or going up */
7160                 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7161         }
7162
7163         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7164
7165         /* inform the SMA when the link transitions from up to down */
7166         if (was_up && ppd->local_link_down_reason.sma == 0 &&
7167             ppd->neigh_link_down_reason.sma == 0) {
7168                 ppd->local_link_down_reason.sma =
7169                                         ppd->local_link_down_reason.latest;
7170                 ppd->neigh_link_down_reason.sma =
7171                                         ppd->neigh_link_down_reason.latest;
7172         }
7173
7174         reset_neighbor_info(ppd);
7175
7176         /* disable the port */
7177         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7178
7179         /*
7180          * If there is no cable attached, turn the DC off. Otherwise,
7181          * start the link bring up.
7182          */
7183         if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7184                 dc_shutdown(ppd->dd);
7185         else
7186                 start_link(ppd);
7187 }
7188
7189 void handle_link_bounce(struct work_struct *work)
7190 {
7191         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7192                                                         link_bounce_work);
7193
7194         /*
7195          * Only do something if the link is currently up.
7196          */
7197         if (ppd->host_link_state & HLS_UP) {
7198                 set_link_state(ppd, HLS_DN_OFFLINE);
7199                 start_link(ppd);
7200         } else {
7201                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7202                             __func__, link_state_name(ppd->host_link_state));
7203         }
7204 }
7205
7206 /*
7207  * Mask conversion: Capability exchange to Port LTP.  The capability
7208  * exchange has an implicit 16b CRC that is mandatory.
7209  */
7210 static int cap_to_port_ltp(int cap)
7211 {
7212         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7213
7214         if (cap & CAP_CRC_14B)
7215                 port_ltp |= PORT_LTP_CRC_MODE_14;
7216         if (cap & CAP_CRC_48B)
7217                 port_ltp |= PORT_LTP_CRC_MODE_48;
7218         if (cap & CAP_CRC_12B_16B_PER_LANE)
7219                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7220
7221         return port_ltp;
7222 }
7223
7224 /*
7225  * Convert an OPA Port LTP mask to capability mask
7226  */
7227 int port_ltp_to_cap(int port_ltp)
7228 {
7229         int cap_mask = 0;
7230
7231         if (port_ltp & PORT_LTP_CRC_MODE_14)
7232                 cap_mask |= CAP_CRC_14B;
7233         if (port_ltp & PORT_LTP_CRC_MODE_48)
7234                 cap_mask |= CAP_CRC_48B;
7235         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7236                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7237
7238         return cap_mask;
7239 }
7240
7241 /*
7242  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7243  */
7244 static int lcb_to_port_ltp(int lcb_crc)
7245 {
7246         int port_ltp = 0;
7247
7248         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7249                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7250         else if (lcb_crc == LCB_CRC_48B)
7251                 port_ltp = PORT_LTP_CRC_MODE_48;
7252         else if (lcb_crc == LCB_CRC_14B)
7253                 port_ltp = PORT_LTP_CRC_MODE_14;
7254         else
7255                 port_ltp = PORT_LTP_CRC_MODE_16;
7256
7257         return port_ltp;
7258 }
7259
7260 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7261 {
7262         if (ppd->pkeys[2] != 0) {
7263                 ppd->pkeys[2] = 0;
7264                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7265                 hfi1_event_pkey_change(ppd->dd, ppd->port);
7266         }
7267 }
7268
7269 /*
7270  * Convert the given link width to the OPA link width bitmask.
7271  */
7272 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7273 {
7274         switch (width) {
7275         case 0:
7276                 /*
7277                  * Simulator and quick linkup do not set the width.
7278                  * Just set it to 4x without complaint.
7279                  */
7280                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7281                         return OPA_LINK_WIDTH_4X;
7282                 return 0; /* no lanes up */
7283         case 1: return OPA_LINK_WIDTH_1X;
7284         case 2: return OPA_LINK_WIDTH_2X;
7285         case 3: return OPA_LINK_WIDTH_3X;
7286         default:
7287                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7288                             __func__, width);
7289                 /* fall through */
7290         case 4: return OPA_LINK_WIDTH_4X;
7291         }
7292 }
7293
7294 /*
7295  * Do a population count on the bottom nibble.
7296  */
7297 static const u8 bit_counts[16] = {
7298         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7299 };
7300
7301 static inline u8 nibble_to_count(u8 nibble)
7302 {
7303         return bit_counts[nibble & 0xf];
7304 }
7305
7306 /*
7307  * Read the active lane information from the 8051 registers and return
7308  * their widths.
7309  *
7310  * Active lane information is found in these 8051 registers:
7311  *      enable_lane_tx
7312  *      enable_lane_rx
7313  */
7314 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7315                             u16 *rx_width)
7316 {
7317         u16 tx, rx;
7318         u8 enable_lane_rx;
7319         u8 enable_lane_tx;
7320         u8 tx_polarity_inversion;
7321         u8 rx_polarity_inversion;
7322         u8 max_rate;
7323
7324         /* read the active lanes */
7325         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7326                          &rx_polarity_inversion, &max_rate);
7327         read_local_lni(dd, &enable_lane_rx);
7328
7329         /* convert to counts */
7330         tx = nibble_to_count(enable_lane_tx);
7331         rx = nibble_to_count(enable_lane_rx);
7332
7333         /*
7334          * Set link_speed_active here, overriding what was set in
7335          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7336          * set the max_rate field in handle_verify_cap until v0.19.
7337          */
7338         if ((dd->icode == ICODE_RTL_SILICON) &&
7339             (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7340                 /* max_rate: 0 = 12.5G, 1 = 25G */
7341                 switch (max_rate) {
7342                 case 0:
7343                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7344                         break;
7345                 default:
7346                         dd_dev_err(dd,
7347                                    "%s: unexpected max rate %d, using 25Gb\n",
7348                                    __func__, (int)max_rate);
7349                         /* fall through */
7350                 case 1:
7351                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7352                         break;
7353                 }
7354         }
7355
7356         dd_dev_info(dd,
7357                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7358                     enable_lane_tx, tx, enable_lane_rx, rx);
7359         *tx_width = link_width_to_bits(dd, tx);
7360         *rx_width = link_width_to_bits(dd, rx);
7361 }
7362
7363 /*
7364  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7365  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7366  * after link up.  I.e. look elsewhere for downgrade information.
7367  *
7368  * Bits are:
7369  *      + bits [7:4] contain the number of active transmitters
7370  *      + bits [3:0] contain the number of active receivers
7371  * These are numbers 1 through 4 and can be different values if the
7372  * link is asymmetric.
7373  *
7374  * verify_cap_local_fm_link_width[0] retains its original value.
7375  */
7376 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7377                               u16 *rx_width)
7378 {
7379         u16 widths, tx, rx;
7380         u8 misc_bits, local_flags;
7381         u16 active_tx, active_rx;
7382
7383         read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7384         tx = widths >> 12;
7385         rx = (widths >> 8) & 0xf;
7386
7387         *tx_width = link_width_to_bits(dd, tx);
7388         *rx_width = link_width_to_bits(dd, rx);
7389
7390         /* print the active widths */
7391         get_link_widths(dd, &active_tx, &active_rx);
7392 }
7393
7394 /*
7395  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7396  * hardware information when the link first comes up.
7397  *
7398  * The link width is not available until after VerifyCap.AllFramesReceived
7399  * (the trigger for handle_verify_cap), so this is outside that routine
7400  * and should be called when the 8051 signals linkup.
7401  */
7402 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7403 {
7404         u16 tx_width, rx_width;
7405
7406         /* get end-of-LNI link widths */
7407         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7408
7409         /* use tx_width as the link is supposed to be symmetric on link up */
7410         ppd->link_width_active = tx_width;
7411         /* link width downgrade active (LWD.A) starts out matching LW.A */
7412         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7413         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7414         /* per OPA spec, on link up LWD.E resets to LWD.S */
7415         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7416         /* cache the active egress rate (units {10^6 bits/sec]) */
7417         ppd->current_egress_rate = active_egress_rate(ppd);
7418 }
7419
7420 /*
7421  * Handle a verify capabilities interrupt from the 8051.
7422  *
7423  * This is a work-queue function outside of the interrupt.
7424  */
7425 void handle_verify_cap(struct work_struct *work)
7426 {
7427         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7428                                                                 link_vc_work);
7429         struct hfi1_devdata *dd = ppd->dd;
7430         u64 reg;
7431         u8 power_management;
7432         u8 continuous;
7433         u8 vcu;
7434         u8 vau;
7435         u8 z;
7436         u16 vl15buf;
7437         u16 link_widths;
7438         u16 crc_mask;
7439         u16 crc_val;
7440         u16 device_id;
7441         u16 active_tx, active_rx;
7442         u8 partner_supported_crc;
7443         u8 remote_tx_rate;
7444         u8 device_rev;
7445
7446         set_link_state(ppd, HLS_VERIFY_CAP);
7447
7448         lcb_shutdown(dd, 0);
7449         adjust_lcb_for_fpga_serdes(dd);
7450
7451         read_vc_remote_phy(dd, &power_management, &continuous);
7452         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7453                               &partner_supported_crc);
7454         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7455         read_remote_device_id(dd, &device_id, &device_rev);
7456
7457         /* print the active widths */
7458         get_link_widths(dd, &active_tx, &active_rx);
7459         dd_dev_info(dd,
7460                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7461                     (int)power_management, (int)continuous);
7462         dd_dev_info(dd,
7463                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7464                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7465                     (int)partner_supported_crc);
7466         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7467                     (u32)remote_tx_rate, (u32)link_widths);
7468         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7469                     (u32)device_id, (u32)device_rev);
7470         /*
7471          * The peer vAU value just read is the peer receiver value.  HFI does
7472          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7473          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7474          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7475          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7476          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7477          * subject to the Z value exception.
7478          */
7479         if (vau == 0)
7480                 vau = 1;
7481         set_up_vau(dd, vau);
7482
7483         /*
7484          * Set VL15 credits to 0 in global credit register. Cache remote VL15
7485          * credits value and wait for link-up interrupt ot set it.
7486          */
7487         set_up_vl15(dd, 0);
7488         dd->vl15buf_cached = vl15buf;
7489
7490         /* set up the LCB CRC mode */
7491         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7492
7493         /* order is important: use the lowest bit in common */
7494         if (crc_mask & CAP_CRC_14B)
7495                 crc_val = LCB_CRC_14B;
7496         else if (crc_mask & CAP_CRC_48B)
7497                 crc_val = LCB_CRC_48B;
7498         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7499                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7500         else
7501                 crc_val = LCB_CRC_16B;
7502
7503         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7504         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7505                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7506
7507         /* set (14b only) or clear sideband credit */
7508         reg = read_csr(dd, SEND_CM_CTRL);
7509         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7510                 write_csr(dd, SEND_CM_CTRL,
7511                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7512         } else {
7513                 write_csr(dd, SEND_CM_CTRL,
7514                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7515         }
7516
7517         ppd->link_speed_active = 0;     /* invalid value */
7518         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7519                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7520                 switch (remote_tx_rate) {
7521                 case 0:
7522                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7523                         break;
7524                 case 1:
7525                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7526                         break;
7527                 }
7528         } else {
7529                 /* actual rate is highest bit of the ANDed rates */
7530                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7531
7532                 if (rate & 2)
7533                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7534                 else if (rate & 1)
7535                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7536         }
7537         if (ppd->link_speed_active == 0) {
7538                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7539                            __func__, (int)remote_tx_rate);
7540                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7541         }
7542
7543         /*
7544          * Cache the values of the supported, enabled, and active
7545          * LTP CRC modes to return in 'portinfo' queries. But the bit
7546          * flags that are returned in the portinfo query differ from
7547          * what's in the link_crc_mask, crc_sizes, and crc_val
7548          * variables. Convert these here.
7549          */
7550         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7551                 /* supported crc modes */
7552         ppd->port_ltp_crc_mode |=
7553                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7554                 /* enabled crc modes */
7555         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7556                 /* active crc mode */
7557
7558         /* set up the remote credit return table */
7559         assign_remote_cm_au_table(dd, vcu);
7560
7561         /*
7562          * The LCB is reset on entry to handle_verify_cap(), so this must
7563          * be applied on every link up.
7564          *
7565          * Adjust LCB error kill enable to kill the link if
7566          * these RBUF errors are seen:
7567          *      REPLAY_BUF_MBE_SMASK
7568          *      FLIT_INPUT_BUF_MBE_SMASK
7569          */
7570         if (is_ax(dd)) {                        /* fixed in B0 */
7571                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7572                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7573                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7574                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7575         }
7576
7577         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7578         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7579
7580         /* give 8051 access to the LCB CSRs */
7581         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7582         set_8051_lcb_access(dd);
7583
7584         /* tell the 8051 to go to LinkUp */
7585         set_link_state(ppd, HLS_GOING_UP);
7586 }
7587
7588 /**
7589  * apply_link_downgrade_policy - Apply the link width downgrade enabled
7590  * policy against the current active link widths.
7591  * @ppd: info of physical Hfi port
7592  * @refresh_widths: True indicates link downgrade event
7593  * @return: True indicates a successful link downgrade. False indicates
7594  *          link downgrade event failed and the link will bounce back to
7595  *          default link width.
7596  *
7597  * Called when the enabled policy changes or the active link widths
7598  * change.
7599  * Refresh_widths indicates that a link downgrade occurred. The
7600  * link_downgraded variable is set by refresh_widths and
7601  * determines the success/failure of the policy application.
7602  */
7603 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7604                                  bool refresh_widths)
7605 {
7606         int do_bounce = 0;
7607         int tries;
7608         u16 lwde;
7609         u16 tx, rx;
7610         bool link_downgraded = refresh_widths;
7611
7612         /* use the hls lock to avoid a race with actual link up */
7613         tries = 0;
7614 retry:
7615         mutex_lock(&ppd->hls_lock);
7616         /* only apply if the link is up */
7617         if (ppd->host_link_state & HLS_DOWN) {
7618                 /* still going up..wait and retry */
7619                 if (ppd->host_link_state & HLS_GOING_UP) {
7620                         if (++tries < 1000) {
7621                                 mutex_unlock(&ppd->hls_lock);
7622                                 usleep_range(100, 120); /* arbitrary */
7623                                 goto retry;
7624                         }
7625                         dd_dev_err(ppd->dd,
7626                                    "%s: giving up waiting for link state change\n",
7627                                    __func__);
7628                 }
7629                 goto done;
7630         }
7631
7632         lwde = ppd->link_width_downgrade_enabled;
7633
7634         if (refresh_widths) {
7635                 get_link_widths(ppd->dd, &tx, &rx);
7636                 ppd->link_width_downgrade_tx_active = tx;
7637                 ppd->link_width_downgrade_rx_active = rx;
7638         }
7639
7640         if (ppd->link_width_downgrade_tx_active == 0 ||
7641             ppd->link_width_downgrade_rx_active == 0) {
7642                 /* the 8051 reported a dead link as a downgrade */
7643                 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7644                 link_downgraded = false;
7645         } else if (lwde == 0) {
7646                 /* downgrade is disabled */
7647
7648                 /* bounce if not at starting active width */
7649                 if ((ppd->link_width_active !=
7650                      ppd->link_width_downgrade_tx_active) ||
7651                     (ppd->link_width_active !=
7652                      ppd->link_width_downgrade_rx_active)) {
7653                         dd_dev_err(ppd->dd,
7654                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7655                         dd_dev_err(ppd->dd,
7656                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7657                                    ppd->link_width_active,
7658                                    ppd->link_width_downgrade_tx_active,
7659                                    ppd->link_width_downgrade_rx_active);
7660                         do_bounce = 1;
7661                         link_downgraded = false;
7662                 }
7663         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7664                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7665                 /* Tx or Rx is outside the enabled policy */
7666                 dd_dev_err(ppd->dd,
7667                            "Link is outside of downgrade allowed, downing link\n");
7668                 dd_dev_err(ppd->dd,
7669                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7670                            lwde, ppd->link_width_downgrade_tx_active,
7671                            ppd->link_width_downgrade_rx_active);
7672                 do_bounce = 1;
7673                 link_downgraded = false;
7674         }
7675
7676 done:
7677         mutex_unlock(&ppd->hls_lock);
7678
7679         if (do_bounce) {
7680                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7681                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7682                 set_link_state(ppd, HLS_DN_OFFLINE);
7683                 start_link(ppd);
7684         }
7685
7686         return link_downgraded;
7687 }
7688
7689 /*
7690  * Handle a link downgrade interrupt from the 8051.
7691  *
7692  * This is a work-queue function outside of the interrupt.
7693  */
7694 void handle_link_downgrade(struct work_struct *work)
7695 {
7696         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7697                                                         link_downgrade_work);
7698
7699         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7700         if (apply_link_downgrade_policy(ppd, true))
7701                 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7702 }
7703
7704 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7705 {
7706         return flag_string(buf, buf_len, flags, dcc_err_flags,
7707                 ARRAY_SIZE(dcc_err_flags));
7708 }
7709
7710 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7711 {
7712         return flag_string(buf, buf_len, flags, lcb_err_flags,
7713                 ARRAY_SIZE(lcb_err_flags));
7714 }
7715
7716 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7717 {
7718         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7719                 ARRAY_SIZE(dc8051_err_flags));
7720 }
7721
7722 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7723 {
7724         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7725                 ARRAY_SIZE(dc8051_info_err_flags));
7726 }
7727
7728 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7729 {
7730         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7731                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7732 }
7733
7734 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7735 {
7736         struct hfi1_pportdata *ppd = dd->pport;
7737         u64 info, err, host_msg;
7738         int queue_link_down = 0;
7739         char buf[96];
7740
7741         /* look at the flags */
7742         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7743                 /* 8051 information set by firmware */
7744                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7745                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7746                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7747                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7748                 host_msg = (info >>
7749                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7750                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7751
7752                 /*
7753                  * Handle error flags.
7754                  */
7755                 if (err & FAILED_LNI) {
7756                         /*
7757                          * LNI error indications are cleared by the 8051
7758                          * only when starting polling.  Only pay attention
7759                          * to them when in the states that occur during
7760                          * LNI.
7761                          */
7762                         if (ppd->host_link_state
7763                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7764                                 queue_link_down = 1;
7765                                 dd_dev_info(dd, "Link error: %s\n",
7766                                             dc8051_info_err_string(buf,
7767                                                                    sizeof(buf),
7768                                                                    err &
7769                                                                    FAILED_LNI));
7770                         }
7771                         err &= ~(u64)FAILED_LNI;
7772                 }
7773                 /* unknown frames can happen durning LNI, just count */
7774                 if (err & UNKNOWN_FRAME) {
7775                         ppd->unknown_frame_count++;
7776                         err &= ~(u64)UNKNOWN_FRAME;
7777                 }
7778                 if (err) {
7779                         /* report remaining errors, but do not do anything */
7780                         dd_dev_err(dd, "8051 info error: %s\n",
7781                                    dc8051_info_err_string(buf, sizeof(buf),
7782                                                           err));
7783                 }
7784
7785                 /*
7786                  * Handle host message flags.
7787                  */
7788                 if (host_msg & HOST_REQ_DONE) {
7789                         /*
7790                          * Presently, the driver does a busy wait for
7791                          * host requests to complete.  This is only an
7792                          * informational message.
7793                          * NOTE: The 8051 clears the host message
7794                          * information *on the next 8051 command*.
7795                          * Therefore, when linkup is achieved,
7796                          * this flag will still be set.
7797                          */
7798                         host_msg &= ~(u64)HOST_REQ_DONE;
7799                 }
7800                 if (host_msg & BC_SMA_MSG) {
7801                         queue_work(ppd->link_wq, &ppd->sma_message_work);
7802                         host_msg &= ~(u64)BC_SMA_MSG;
7803                 }
7804                 if (host_msg & LINKUP_ACHIEVED) {
7805                         dd_dev_info(dd, "8051: Link up\n");
7806                         queue_work(ppd->link_wq, &ppd->link_up_work);
7807                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7808                 }
7809                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7810                         handle_8051_request(ppd);
7811                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7812                 }
7813                 if (host_msg & VERIFY_CAP_FRAME) {
7814                         queue_work(ppd->link_wq, &ppd->link_vc_work);
7815                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7816                 }
7817                 if (host_msg & LINK_GOING_DOWN) {
7818                         const char *extra = "";
7819                         /* no downgrade action needed if going down */
7820                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7821                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7822                                 extra = " (ignoring downgrade)";
7823                         }
7824                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7825                         queue_link_down = 1;
7826                         host_msg &= ~(u64)LINK_GOING_DOWN;
7827                 }
7828                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7829                         queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7830                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7831                 }
7832                 if (host_msg) {
7833                         /* report remaining messages, but do not do anything */
7834                         dd_dev_info(dd, "8051 info host message: %s\n",
7835                                     dc8051_info_host_msg_string(buf,
7836                                                                 sizeof(buf),
7837                                                                 host_msg));
7838                 }
7839
7840                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7841         }
7842         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7843                 /*
7844                  * Lost the 8051 heartbeat.  If this happens, we
7845                  * receive constant interrupts about it.  Disable
7846                  * the interrupt after the first.
7847                  */
7848                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7849                 write_csr(dd, DC_DC8051_ERR_EN,
7850                           read_csr(dd, DC_DC8051_ERR_EN) &
7851                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7852
7853                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7854         }
7855         if (reg) {
7856                 /* report the error, but do not do anything */
7857                 dd_dev_err(dd, "8051 error: %s\n",
7858                            dc8051_err_string(buf, sizeof(buf), reg));
7859         }
7860
7861         if (queue_link_down) {
7862                 /*
7863                  * if the link is already going down or disabled, do not
7864                  * queue another. If there's a link down entry already
7865                  * queued, don't queue another one.
7866                  */
7867                 if ((ppd->host_link_state &
7868                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7869                     ppd->link_enabled == 0) {
7870                         dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7871                                     __func__, ppd->host_link_state,
7872                                     ppd->link_enabled);
7873                 } else {
7874                         if (xchg(&ppd->is_link_down_queued, 1) == 1)
7875                                 dd_dev_info(dd,
7876                                             "%s: link down request already queued\n",
7877                                             __func__);
7878                         else
7879                                 queue_work(ppd->link_wq, &ppd->link_down_work);
7880                 }
7881         }
7882 }
7883
7884 static const char * const fm_config_txt[] = {
7885 [0] =
7886         "BadHeadDist: Distance violation between two head flits",
7887 [1] =
7888         "BadTailDist: Distance violation between two tail flits",
7889 [2] =
7890         "BadCtrlDist: Distance violation between two credit control flits",
7891 [3] =
7892         "BadCrdAck: Credits return for unsupported VL",
7893 [4] =
7894         "UnsupportedVLMarker: Received VL Marker",
7895 [5] =
7896         "BadPreempt: Exceeded the preemption nesting level",
7897 [6] =
7898         "BadControlFlit: Received unsupported control flit",
7899 /* no 7 */
7900 [8] =
7901         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7902 };
7903
7904 static const char * const port_rcv_txt[] = {
7905 [1] =
7906         "BadPktLen: Illegal PktLen",
7907 [2] =
7908         "PktLenTooLong: Packet longer than PktLen",
7909 [3] =
7910         "PktLenTooShort: Packet shorter than PktLen",
7911 [4] =
7912         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7913 [5] =
7914         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7915 [6] =
7916         "BadL2: Illegal L2 opcode",
7917 [7] =
7918         "BadSC: Unsupported SC",
7919 [9] =
7920         "BadRC: Illegal RC",
7921 [11] =
7922         "PreemptError: Preempting with same VL",
7923 [12] =
7924         "PreemptVL15: Preempting a VL15 packet",
7925 };
7926
7927 #define OPA_LDR_FMCONFIG_OFFSET 16
7928 #define OPA_LDR_PORTRCV_OFFSET 0
7929 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7930 {
7931         u64 info, hdr0, hdr1;
7932         const char *extra;
7933         char buf[96];
7934         struct hfi1_pportdata *ppd = dd->pport;
7935         u8 lcl_reason = 0;
7936         int do_bounce = 0;
7937
7938         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7939                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7940                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7941                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7942                         /* set status bit */
7943                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7944                 }
7945                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7946         }
7947
7948         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7949                 struct hfi1_pportdata *ppd = dd->pport;
7950                 /* this counter saturates at (2^32) - 1 */
7951                 if (ppd->link_downed < (u32)UINT_MAX)
7952                         ppd->link_downed++;
7953                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7954         }
7955
7956         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7957                 u8 reason_valid = 1;
7958
7959                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7960                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7961                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7962                         /* set status bit */
7963                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7964                 }
7965                 switch (info) {
7966                 case 0:
7967                 case 1:
7968                 case 2:
7969                 case 3:
7970                 case 4:
7971                 case 5:
7972                 case 6:
7973                         extra = fm_config_txt[info];
7974                         break;
7975                 case 8:
7976                         extra = fm_config_txt[info];
7977                         if (ppd->port_error_action &
7978                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7979                                 do_bounce = 1;
7980                                 /*
7981                                  * lcl_reason cannot be derived from info
7982                                  * for this error
7983                                  */
7984                                 lcl_reason =
7985                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7986                         }
7987                         break;
7988                 default:
7989                         reason_valid = 0;
7990                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7991                         extra = buf;
7992                         break;
7993                 }
7994
7995                 if (reason_valid && !do_bounce) {
7996                         do_bounce = ppd->port_error_action &
7997                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7998                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7999                 }
8000
8001                 /* just report this */
8002                 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8003                                         extra);
8004                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8005         }
8006
8007         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8008                 u8 reason_valid = 1;
8009
8010                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8011                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8012                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8013                 if (!(dd->err_info_rcvport.status_and_code &
8014                       OPA_EI_STATUS_SMASK)) {
8015                         dd->err_info_rcvport.status_and_code =
8016                                 info & OPA_EI_CODE_SMASK;
8017                         /* set status bit */
8018                         dd->err_info_rcvport.status_and_code |=
8019                                 OPA_EI_STATUS_SMASK;
8020                         /*
8021                          * save first 2 flits in the packet that caused
8022                          * the error
8023                          */
8024                         dd->err_info_rcvport.packet_flit1 = hdr0;
8025                         dd->err_info_rcvport.packet_flit2 = hdr1;
8026                 }
8027                 switch (info) {
8028                 case 1:
8029                 case 2:
8030                 case 3:
8031                 case 4:
8032                 case 5:
8033                 case 6:
8034                 case 7:
8035                 case 9:
8036                 case 11:
8037                 case 12:
8038                         extra = port_rcv_txt[info];
8039                         break;
8040                 default:
8041                         reason_valid = 0;
8042                         snprintf(buf, sizeof(buf), "reserved%lld", info);
8043                         extra = buf;
8044                         break;
8045                 }
8046
8047                 if (reason_valid && !do_bounce) {
8048                         do_bounce = ppd->port_error_action &
8049                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8050                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8051                 }
8052
8053                 /* just report this */
8054                 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8055                                         "               hdr0 0x%llx, hdr1 0x%llx\n",
8056                                         extra, hdr0, hdr1);
8057
8058                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8059         }
8060
8061         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8062                 /* informative only */
8063                 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8064                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8065         }
8066         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8067                 /* informative only */
8068                 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8069                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8070         }
8071
8072         if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8073                 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8074
8075         /* report any remaining errors */
8076         if (reg)
8077                 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8078                                         dcc_err_string(buf, sizeof(buf), reg));
8079
8080         if (lcl_reason == 0)
8081                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8082
8083         if (do_bounce) {
8084                 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8085                                         __func__);
8086                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8087                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8088         }
8089 }
8090
8091 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8092 {
8093         char buf[96];
8094
8095         dd_dev_info(dd, "LCB Error: %s\n",
8096                     lcb_err_string(buf, sizeof(buf), reg));
8097 }
8098
8099 /*
8100  * CCE block DC interrupt.  Source is < 8.
8101  */
8102 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8103 {
8104         const struct err_reg_info *eri = &dc_errs[source];
8105
8106         if (eri->handler) {
8107                 interrupt_clear_down(dd, 0, eri);
8108         } else if (source == 3 /* dc_lbm_int */) {
8109                 /*
8110                  * This indicates that a parity error has occurred on the
8111                  * address/control lines presented to the LBM.  The error
8112                  * is a single pulse, there is no associated error flag,
8113                  * and it is non-maskable.  This is because if a parity
8114                  * error occurs on the request the request is dropped.
8115                  * This should never occur, but it is nice to know if it
8116                  * ever does.
8117                  */
8118                 dd_dev_err(dd, "Parity error in DC LBM block\n");
8119         } else {
8120                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8121         }
8122 }
8123
8124 /*
8125  * TX block send credit interrupt.  Source is < 160.
8126  */
8127 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8128 {
8129         sc_group_release_update(dd, source);
8130 }
8131
8132 /*
8133  * TX block SDMA interrupt.  Source is < 48.
8134  *
8135  * SDMA interrupts are grouped by type:
8136  *
8137  *       0 -  N-1 = SDma
8138  *       N - 2N-1 = SDmaProgress
8139  *      2N - 3N-1 = SDmaIdle
8140  */
8141 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8142 {
8143         /* what interrupt */
8144         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8145         /* which engine */
8146         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8147
8148 #ifdef CONFIG_SDMA_VERBOSITY
8149         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8150                    slashstrip(__FILE__), __LINE__, __func__);
8151         sdma_dumpstate(&dd->per_sdma[which]);
8152 #endif
8153
8154         if (likely(what < 3 && which < dd->num_sdma)) {
8155                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8156         } else {
8157                 /* should not happen */
8158                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8159         }
8160 }
8161
8162 /**
8163  * is_rcv_avail_int() - User receive context available IRQ handler
8164  * @dd: valid dd
8165  * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8166  *
8167  * RX block receive available interrupt.  Source is < 160.
8168  *
8169  * This is the general interrupt handler for user (PSM) receive contexts,
8170  * and can only be used for non-threaded IRQs.
8171  */
8172 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8173 {
8174         struct hfi1_ctxtdata *rcd;
8175         char *err_detail;
8176
8177         if (likely(source < dd->num_rcv_contexts)) {
8178                 rcd = hfi1_rcd_get_by_index(dd, source);
8179                 if (rcd) {
8180                         handle_user_interrupt(rcd);
8181                         hfi1_rcd_put(rcd);
8182                         return; /* OK */
8183                 }
8184                 /* received an interrupt, but no rcd */
8185                 err_detail = "dataless";
8186         } else {
8187                 /* received an interrupt, but are not using that context */
8188                 err_detail = "out of range";
8189         }
8190         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8191                    err_detail, source);
8192 }
8193
8194 /**
8195  * is_rcv_urgent_int() - User receive context urgent IRQ handler
8196  * @dd: valid dd
8197  * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8198  *
8199  * RX block receive urgent interrupt.  Source is < 160.
8200  *
8201  * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8202  */
8203 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8204 {
8205         struct hfi1_ctxtdata *rcd;
8206         char *err_detail;
8207
8208         if (likely(source < dd->num_rcv_contexts)) {
8209                 rcd = hfi1_rcd_get_by_index(dd, source);
8210                 if (rcd) {
8211                         handle_user_interrupt(rcd);
8212                         hfi1_rcd_put(rcd);
8213                         return; /* OK */
8214                 }
8215                 /* received an interrupt, but no rcd */
8216                 err_detail = "dataless";
8217         } else {
8218                 /* received an interrupt, but are not using that context */
8219                 err_detail = "out of range";
8220         }
8221         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8222                    err_detail, source);
8223 }
8224
8225 /*
8226  * Reserved range interrupt.  Should not be called in normal operation.
8227  */
8228 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8229 {
8230         char name[64];
8231
8232         dd_dev_err(dd, "unexpected %s interrupt\n",
8233                    is_reserved_name(name, sizeof(name), source));
8234 }
8235
8236 static const struct is_table is_table[] = {
8237 /*
8238  * start                 end
8239  *                              name func               interrupt func
8240  */
8241 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8242                                 is_misc_err_name,       is_misc_err_int },
8243 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8244                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
8245 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8246                                 is_sendctxt_err_name,   is_sendctxt_err_int },
8247 { IS_SDMA_START,             IS_SDMA_IDLE_END,
8248                                 is_sdma_eng_name,       is_sdma_eng_int },
8249 { IS_VARIOUS_START,          IS_VARIOUS_END,
8250                                 is_various_name,        is_various_int },
8251 { IS_DC_START,       IS_DC_END,
8252                                 is_dc_name,             is_dc_int },
8253 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8254                                 is_rcv_avail_name,      is_rcv_avail_int },
8255 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8256                                 is_rcv_urgent_name,     is_rcv_urgent_int },
8257 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8258                                 is_send_credit_name,    is_send_credit_int},
8259 { IS_RESERVED_START,     IS_RESERVED_END,
8260                                 is_reserved_name,       is_reserved_int},
8261 };
8262
8263 /*
8264  * Interrupt source interrupt - called when the given source has an interrupt.
8265  * Source is a bit index into an array of 64-bit integers.
8266  */
8267 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8268 {
8269         const struct is_table *entry;
8270
8271         /* avoids a double compare by walking the table in-order */
8272         for (entry = &is_table[0]; entry->is_name; entry++) {
8273                 if (source <= entry->end) {
8274                         trace_hfi1_interrupt(dd, entry, source);
8275                         entry->is_int(dd, source - entry->start);
8276                         return;
8277                 }
8278         }
8279         /* fell off the end */
8280         dd_dev_err(dd, "invalid interrupt source %u\n", source);
8281 }
8282
8283 /**
8284  * gerneral_interrupt() -  General interrupt handler
8285  * @irq: MSIx IRQ vector
8286  * @data: hfi1 devdata
8287  *
8288  * This is able to correctly handle all non-threaded interrupts.  Receive
8289  * context DATA IRQs are threaded and are not supported by this handler.
8290  *
8291  */
8292 irqreturn_t general_interrupt(int irq, void *data)
8293 {
8294         struct hfi1_devdata *dd = data;
8295         u64 regs[CCE_NUM_INT_CSRS];
8296         u32 bit;
8297         int i;
8298         irqreturn_t handled = IRQ_NONE;
8299
8300         this_cpu_inc(*dd->int_counter);
8301
8302         /* phase 1: scan and clear all handled interrupts */
8303         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8304                 if (dd->gi_mask[i] == 0) {
8305                         regs[i] = 0;    /* used later */
8306                         continue;
8307                 }
8308                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8309                                 dd->gi_mask[i];
8310                 /* only clear if anything is set */
8311                 if (regs[i])
8312                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8313         }
8314
8315         /* phase 2: call the appropriate handler */
8316         for_each_set_bit(bit, (unsigned long *)&regs[0],
8317                          CCE_NUM_INT_CSRS * 64) {
8318                 is_interrupt(dd, bit);
8319                 handled = IRQ_HANDLED;
8320         }
8321
8322         return handled;
8323 }
8324
8325 irqreturn_t sdma_interrupt(int irq, void *data)
8326 {
8327         struct sdma_engine *sde = data;
8328         struct hfi1_devdata *dd = sde->dd;
8329         u64 status;
8330
8331 #ifdef CONFIG_SDMA_VERBOSITY
8332         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8333                    slashstrip(__FILE__), __LINE__, __func__);
8334         sdma_dumpstate(sde);
8335 #endif
8336
8337         this_cpu_inc(*dd->int_counter);
8338
8339         /* This read_csr is really bad in the hot path */
8340         status = read_csr(dd,
8341                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8342                           & sde->imask;
8343         if (likely(status)) {
8344                 /* clear the interrupt(s) */
8345                 write_csr(dd,
8346                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8347                           status);
8348
8349                 /* handle the interrupt(s) */
8350                 sdma_engine_interrupt(sde, status);
8351         } else {
8352                 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8353                                         sde->this_idx);
8354         }
8355         return IRQ_HANDLED;
8356 }
8357
8358 /*
8359  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8360  * to insure that the write completed.  This does NOT guarantee that
8361  * queued DMA writes to memory from the chip are pushed.
8362  */
8363 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8364 {
8365         struct hfi1_devdata *dd = rcd->dd;
8366         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8367
8368         write_csr(dd, addr, rcd->imask);
8369         /* force the above write on the chip and get a value back */
8370         (void)read_csr(dd, addr);
8371 }
8372
8373 /* force the receive interrupt */
8374 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8375 {
8376         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8377 }
8378
8379 /*
8380  * Return non-zero if a packet is present.
8381  *
8382  * This routine is called when rechecking for packets after the RcvAvail
8383  * interrupt has been cleared down.  First, do a quick check of memory for
8384  * a packet present.  If not found, use an expensive CSR read of the context
8385  * tail to determine the actual tail.  The CSR read is necessary because there
8386  * is no method to push pending DMAs to memory other than an interrupt and we
8387  * are trying to determine if we need to force an interrupt.
8388  */
8389 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8390 {
8391         u32 tail;
8392         int present;
8393
8394         if (!rcd->rcvhdrtail_kvaddr)
8395                 present = (rcd->seq_cnt ==
8396                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8397         else /* is RDMA rtail */
8398                 present = (rcd->head != get_rcvhdrtail(rcd));
8399
8400         if (present)
8401                 return 1;
8402
8403         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8404         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8405         return rcd->head != tail;
8406 }
8407
8408 /*
8409  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8410  * This routine will try to handle packets immediately (latency), but if
8411  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8412  * chip receive interrupt is *not* cleared down until this or the thread (if
8413  * invoked) is finished.  The intent is to avoid extra interrupts while we
8414  * are processing packets anyway.
8415  */
8416 irqreturn_t receive_context_interrupt(int irq, void *data)
8417 {
8418         struct hfi1_ctxtdata *rcd = data;
8419         struct hfi1_devdata *dd = rcd->dd;
8420         int disposition;
8421         int present;
8422
8423         trace_hfi1_receive_interrupt(dd, rcd);
8424         this_cpu_inc(*dd->int_counter);
8425         aspm_ctx_disable(rcd);
8426
8427         /* receive interrupt remains blocked while processing packets */
8428         disposition = rcd->do_interrupt(rcd, 0);
8429
8430         /*
8431          * Too many packets were seen while processing packets in this
8432          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8433          * remains blocked.
8434          */
8435         if (disposition == RCV_PKT_LIMIT)
8436                 return IRQ_WAKE_THREAD;
8437
8438         /*
8439          * The packet processor detected no more packets.  Clear the receive
8440          * interrupt and recheck for a packet packet that may have arrived
8441          * after the previous check and interrupt clear.  If a packet arrived,
8442          * force another interrupt.
8443          */
8444         clear_recv_intr(rcd);
8445         present = check_packet_present(rcd);
8446         if (present)
8447                 force_recv_intr(rcd);
8448
8449         return IRQ_HANDLED;
8450 }
8451
8452 /*
8453  * Receive packet thread handler.  This expects to be invoked with the
8454  * receive interrupt still blocked.
8455  */
8456 irqreturn_t receive_context_thread(int irq, void *data)
8457 {
8458         struct hfi1_ctxtdata *rcd = data;
8459         int present;
8460
8461         /* receive interrupt is still blocked from the IRQ handler */
8462         (void)rcd->do_interrupt(rcd, 1);
8463
8464         /*
8465          * The packet processor will only return if it detected no more
8466          * packets.  Hold IRQs here so we can safely clear the interrupt and
8467          * recheck for a packet that may have arrived after the previous
8468          * check and the interrupt clear.  If a packet arrived, force another
8469          * interrupt.
8470          */
8471         local_irq_disable();
8472         clear_recv_intr(rcd);
8473         present = check_packet_present(rcd);
8474         if (present)
8475                 force_recv_intr(rcd);
8476         local_irq_enable();
8477
8478         return IRQ_HANDLED;
8479 }
8480
8481 /* ========================================================================= */
8482
8483 u32 read_physical_state(struct hfi1_devdata *dd)
8484 {
8485         u64 reg;
8486
8487         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8488         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8489                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8490 }
8491
8492 u32 read_logical_state(struct hfi1_devdata *dd)
8493 {
8494         u64 reg;
8495
8496         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8497         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8498                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8499 }
8500
8501 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8502 {
8503         u64 reg;
8504
8505         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8506         /* clear current state, set new state */
8507         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8508         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8509         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8510 }
8511
8512 /*
8513  * Use the 8051 to read a LCB CSR.
8514  */
8515 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8516 {
8517         u32 regno;
8518         int ret;
8519
8520         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8521                 if (acquire_lcb_access(dd, 0) == 0) {
8522                         *data = read_csr(dd, addr);
8523                         release_lcb_access(dd, 0);
8524                         return 0;
8525                 }
8526                 return -EBUSY;
8527         }
8528
8529         /* register is an index of LCB registers: (offset - base) / 8 */
8530         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8531         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8532         if (ret != HCMD_SUCCESS)
8533                 return -EBUSY;
8534         return 0;
8535 }
8536
8537 /*
8538  * Provide a cache for some of the LCB registers in case the LCB is
8539  * unavailable.
8540  * (The LCB is unavailable in certain link states, for example.)
8541  */
8542 struct lcb_datum {
8543         u32 off;
8544         u64 val;
8545 };
8546
8547 static struct lcb_datum lcb_cache[] = {
8548         { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8549         { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8550         { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8551 };
8552
8553 static void update_lcb_cache(struct hfi1_devdata *dd)
8554 {
8555         int i;
8556         int ret;
8557         u64 val;
8558
8559         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8560                 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8561
8562                 /* Update if we get good data */
8563                 if (likely(ret != -EBUSY))
8564                         lcb_cache[i].val = val;
8565         }
8566 }
8567
8568 static int read_lcb_cache(u32 off, u64 *val)
8569 {
8570         int i;
8571
8572         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8573                 if (lcb_cache[i].off == off) {
8574                         *val = lcb_cache[i].val;
8575                         return 0;
8576                 }
8577         }
8578
8579         pr_warn("%s bad offset 0x%x\n", __func__, off);
8580         return -1;
8581 }
8582
8583 /*
8584  * Read an LCB CSR.  Access may not be in host control, so check.
8585  * Return 0 on success, -EBUSY on failure.
8586  */
8587 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8588 {
8589         struct hfi1_pportdata *ppd = dd->pport;
8590
8591         /* if up, go through the 8051 for the value */
8592         if (ppd->host_link_state & HLS_UP)
8593                 return read_lcb_via_8051(dd, addr, data);
8594         /* if going up or down, check the cache, otherwise, no access */
8595         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8596                 if (read_lcb_cache(addr, data))
8597                         return -EBUSY;
8598                 return 0;
8599         }
8600
8601         /* otherwise, host has access */
8602         *data = read_csr(dd, addr);
8603         return 0;
8604 }
8605
8606 /*
8607  * Use the 8051 to write a LCB CSR.
8608  */
8609 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8610 {
8611         u32 regno;
8612         int ret;
8613
8614         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8615             (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8616                 if (acquire_lcb_access(dd, 0) == 0) {
8617                         write_csr(dd, addr, data);
8618                         release_lcb_access(dd, 0);
8619                         return 0;
8620                 }
8621                 return -EBUSY;
8622         }
8623
8624         /* register is an index of LCB registers: (offset - base) / 8 */
8625         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8626         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8627         if (ret != HCMD_SUCCESS)
8628                 return -EBUSY;
8629         return 0;
8630 }
8631
8632 /*
8633  * Write an LCB CSR.  Access may not be in host control, so check.
8634  * Return 0 on success, -EBUSY on failure.
8635  */
8636 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8637 {
8638         struct hfi1_pportdata *ppd = dd->pport;
8639
8640         /* if up, go through the 8051 for the value */
8641         if (ppd->host_link_state & HLS_UP)
8642                 return write_lcb_via_8051(dd, addr, data);
8643         /* if going up or down, no access */
8644         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8645                 return -EBUSY;
8646         /* otherwise, host has access */
8647         write_csr(dd, addr, data);
8648         return 0;
8649 }
8650
8651 /*
8652  * Returns:
8653  *      < 0 = Linux error, not able to get access
8654  *      > 0 = 8051 command RETURN_CODE
8655  */
8656 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8657                            u64 *out_data)
8658 {
8659         u64 reg, completed;
8660         int return_code;
8661         unsigned long timeout;
8662
8663         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8664
8665         mutex_lock(&dd->dc8051_lock);
8666
8667         /* We can't send any commands to the 8051 if it's in reset */
8668         if (dd->dc_shutdown) {
8669                 return_code = -ENODEV;
8670                 goto fail;
8671         }
8672
8673         /*
8674          * If an 8051 host command timed out previously, then the 8051 is
8675          * stuck.
8676          *
8677          * On first timeout, attempt to reset and restart the entire DC
8678          * block (including 8051). (Is this too big of a hammer?)
8679          *
8680          * If the 8051 times out a second time, the reset did not bring it
8681          * back to healthy life. In that case, fail any subsequent commands.
8682          */
8683         if (dd->dc8051_timed_out) {
8684                 if (dd->dc8051_timed_out > 1) {
8685                         dd_dev_err(dd,
8686                                    "Previous 8051 host command timed out, skipping command %u\n",
8687                                    type);
8688                         return_code = -ENXIO;
8689                         goto fail;
8690                 }
8691                 _dc_shutdown(dd);
8692                 _dc_start(dd);
8693         }
8694
8695         /*
8696          * If there is no timeout, then the 8051 command interface is
8697          * waiting for a command.
8698          */
8699
8700         /*
8701          * When writing a LCB CSR, out_data contains the full value to
8702          * to be written, while in_data contains the relative LCB
8703          * address in 7:0.  Do the work here, rather than the caller,
8704          * of distrubting the write data to where it needs to go:
8705          *
8706          * Write data
8707          *   39:00 -> in_data[47:8]
8708          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8709          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8710          */
8711         if (type == HCMD_WRITE_LCB_CSR) {
8712                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8713                 /* must preserve COMPLETED - it is tied to hardware */
8714                 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8715                 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8716                 reg |= ((((*out_data) >> 40) & 0xff) <<
8717                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8718                       | ((((*out_data) >> 48) & 0xffff) <<
8719                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8720                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8721         }
8722
8723         /*
8724          * Do two writes: the first to stabilize the type and req_data, the
8725          * second to activate.
8726          */
8727         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8728                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8729                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8730                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8731         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8732         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8733         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8734
8735         /* wait for completion, alternate: interrupt */
8736         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8737         while (1) {
8738                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8739                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8740                 if (completed)
8741                         break;
8742                 if (time_after(jiffies, timeout)) {
8743                         dd->dc8051_timed_out++;
8744                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8745                         if (out_data)
8746                                 *out_data = 0;
8747                         return_code = -ETIMEDOUT;
8748                         goto fail;
8749                 }
8750                 udelay(2);
8751         }
8752
8753         if (out_data) {
8754                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8755                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8756                 if (type == HCMD_READ_LCB_CSR) {
8757                         /* top 16 bits are in a different register */
8758                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8759                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8760                                 << (48
8761                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8762                 }
8763         }
8764         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8765                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8766         dd->dc8051_timed_out = 0;
8767         /*
8768          * Clear command for next user.
8769          */
8770         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8771
8772 fail:
8773         mutex_unlock(&dd->dc8051_lock);
8774         return return_code;
8775 }
8776
8777 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8778 {
8779         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8780 }
8781
8782 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8783                      u8 lane_id, u32 config_data)
8784 {
8785         u64 data;
8786         int ret;
8787
8788         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8789                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8790                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8791         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8792         if (ret != HCMD_SUCCESS) {
8793                 dd_dev_err(dd,
8794                            "load 8051 config: field id %d, lane %d, err %d\n",
8795                            (int)field_id, (int)lane_id, ret);
8796         }
8797         return ret;
8798 }
8799
8800 /*
8801  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8802  * set the result, even on error.
8803  * Return 0 on success, -errno on failure
8804  */
8805 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8806                      u32 *result)
8807 {
8808         u64 big_data;
8809         u32 addr;
8810         int ret;
8811
8812         /* address start depends on the lane_id */
8813         if (lane_id < 4)
8814                 addr = (4 * NUM_GENERAL_FIELDS)
8815                         + (lane_id * 4 * NUM_LANE_FIELDS);
8816         else
8817                 addr = 0;
8818         addr += field_id * 4;
8819
8820         /* read is in 8-byte chunks, hardware will truncate the address down */
8821         ret = read_8051_data(dd, addr, 8, &big_data);
8822
8823         if (ret == 0) {
8824                 /* extract the 4 bytes we want */
8825                 if (addr & 0x4)
8826                         *result = (u32)(big_data >> 32);
8827                 else
8828                         *result = (u32)big_data;
8829         } else {
8830                 *result = 0;
8831                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8832                            __func__, lane_id, field_id);
8833         }
8834
8835         return ret;
8836 }
8837
8838 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8839                               u8 continuous)
8840 {
8841         u32 frame;
8842
8843         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8844                 | power_management << POWER_MANAGEMENT_SHIFT;
8845         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8846                                 GENERAL_CONFIG, frame);
8847 }
8848
8849 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8850                                  u16 vl15buf, u8 crc_sizes)
8851 {
8852         u32 frame;
8853
8854         frame = (u32)vau << VAU_SHIFT
8855                 | (u32)z << Z_SHIFT
8856                 | (u32)vcu << VCU_SHIFT
8857                 | (u32)vl15buf << VL15BUF_SHIFT
8858                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8859         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8860                                 GENERAL_CONFIG, frame);
8861 }
8862
8863 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8864                                     u8 *flag_bits, u16 *link_widths)
8865 {
8866         u32 frame;
8867
8868         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8869                          &frame);
8870         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8871         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8872         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8873 }
8874
8875 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8876                                     u8 misc_bits,
8877                                     u8 flag_bits,
8878                                     u16 link_widths)
8879 {
8880         u32 frame;
8881
8882         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8883                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8884                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8885         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8886                      frame);
8887 }
8888
8889 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8890                                  u8 device_rev)
8891 {
8892         u32 frame;
8893
8894         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8895                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8896         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8897 }
8898
8899 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8900                                   u8 *device_rev)
8901 {
8902         u32 frame;
8903
8904         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8905         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8906         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8907                         & REMOTE_DEVICE_REV_MASK;
8908 }
8909
8910 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8911 {
8912         u32 frame;
8913         u32 mask;
8914
8915         mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8916         read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8917         /* Clear, then set field */
8918         frame &= ~mask;
8919         frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8920         return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8921                                 frame);
8922 }
8923
8924 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8925                       u8 *ver_patch)
8926 {
8927         u32 frame;
8928
8929         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8930         *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8931                 STS_FM_VERSION_MAJOR_MASK;
8932         *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8933                 STS_FM_VERSION_MINOR_MASK;
8934
8935         read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8936         *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8937                 STS_FM_VERSION_PATCH_MASK;
8938 }
8939
8940 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8941                                u8 *continuous)
8942 {
8943         u32 frame;
8944
8945         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8946         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8947                                         & POWER_MANAGEMENT_MASK;
8948         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8949                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8950 }
8951
8952 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8953                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8954 {
8955         u32 frame;
8956
8957         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8958         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8959         *z = (frame >> Z_SHIFT) & Z_MASK;
8960         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8961         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8962         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8963 }
8964
8965 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8966                                       u8 *remote_tx_rate,
8967                                       u16 *link_widths)
8968 {
8969         u32 frame;
8970
8971         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8972                          &frame);
8973         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8974                                 & REMOTE_TX_RATE_MASK;
8975         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8976 }
8977
8978 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8979 {
8980         u32 frame;
8981
8982         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8983         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8984 }
8985
8986 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8987 {
8988         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8989 }
8990
8991 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8992 {
8993         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8994 }
8995
8996 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8997 {
8998         u32 frame;
8999         int ret;
9000
9001         *link_quality = 0;
9002         if (dd->pport->host_link_state & HLS_UP) {
9003                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9004                                        &frame);
9005                 if (ret == 0)
9006                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
9007                                                 & LINK_QUALITY_MASK;
9008         }
9009 }
9010
9011 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9012 {
9013         u32 frame;
9014
9015         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9016         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9017 }
9018
9019 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9020 {
9021         u32 frame;
9022
9023         read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9024         *ldr = (frame & 0xff);
9025 }
9026
9027 static int read_tx_settings(struct hfi1_devdata *dd,
9028                             u8 *enable_lane_tx,
9029                             u8 *tx_polarity_inversion,
9030                             u8 *rx_polarity_inversion,
9031                             u8 *max_rate)
9032 {
9033         u32 frame;
9034         int ret;
9035
9036         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9037         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9038                                 & ENABLE_LANE_TX_MASK;
9039         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9040                                 & TX_POLARITY_INVERSION_MASK;
9041         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9042                                 & RX_POLARITY_INVERSION_MASK;
9043         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9044         return ret;
9045 }
9046
9047 static int write_tx_settings(struct hfi1_devdata *dd,
9048                              u8 enable_lane_tx,
9049                              u8 tx_polarity_inversion,
9050                              u8 rx_polarity_inversion,
9051                              u8 max_rate)
9052 {
9053         u32 frame;
9054
9055         /* no need to mask, all variable sizes match field widths */
9056         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9057                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9058                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9059                 | max_rate << MAX_RATE_SHIFT;
9060         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9061 }
9062
9063 /*
9064  * Read an idle LCB message.
9065  *
9066  * Returns 0 on success, -EINVAL on error
9067  */
9068 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9069 {
9070         int ret;
9071
9072         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9073         if (ret != HCMD_SUCCESS) {
9074                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9075                            (u32)type, ret);
9076                 return -EINVAL;
9077         }
9078         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9079         /* return only the payload as we already know the type */
9080         *data_out >>= IDLE_PAYLOAD_SHIFT;
9081         return 0;
9082 }
9083
9084 /*
9085  * Read an idle SMA message.  To be done in response to a notification from
9086  * the 8051.
9087  *
9088  * Returns 0 on success, -EINVAL on error
9089  */
9090 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9091 {
9092         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9093                                  data);
9094 }
9095
9096 /*
9097  * Send an idle LCB message.
9098  *
9099  * Returns 0 on success, -EINVAL on error
9100  */
9101 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9102 {
9103         int ret;
9104
9105         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9106         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9107         if (ret != HCMD_SUCCESS) {
9108                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9109                            data, ret);
9110                 return -EINVAL;
9111         }
9112         return 0;
9113 }
9114
9115 /*
9116  * Send an idle SMA message.
9117  *
9118  * Returns 0 on success, -EINVAL on error
9119  */
9120 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9121 {
9122         u64 data;
9123
9124         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9125                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9126         return send_idle_message(dd, data);
9127 }
9128
9129 /*
9130  * Initialize the LCB then do a quick link up.  This may or may not be
9131  * in loopback.
9132  *
9133  * return 0 on success, -errno on error
9134  */
9135 static int do_quick_linkup(struct hfi1_devdata *dd)
9136 {
9137         int ret;
9138
9139         lcb_shutdown(dd, 0);
9140
9141         if (loopback) {
9142                 /* LCB_CFG_LOOPBACK.VAL = 2 */
9143                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9144                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9145                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9146                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9147         }
9148
9149         /* start the LCBs */
9150         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9151         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9152
9153         /* simulator only loopback steps */
9154         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9155                 /* LCB_CFG_RUN.EN = 1 */
9156                 write_csr(dd, DC_LCB_CFG_RUN,
9157                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9158
9159                 ret = wait_link_transfer_active(dd, 10);
9160                 if (ret)
9161                         return ret;
9162
9163                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9164                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9165         }
9166
9167         if (!loopback) {
9168                 /*
9169                  * When doing quick linkup and not in loopback, both
9170                  * sides must be done with LCB set-up before either
9171                  * starts the quick linkup.  Put a delay here so that
9172                  * both sides can be started and have a chance to be
9173                  * done with LCB set up before resuming.
9174                  */
9175                 dd_dev_err(dd,
9176                            "Pausing for peer to be finished with LCB set up\n");
9177                 msleep(5000);
9178                 dd_dev_err(dd, "Continuing with quick linkup\n");
9179         }
9180
9181         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9182         set_8051_lcb_access(dd);
9183
9184         /*
9185          * State "quick" LinkUp request sets the physical link state to
9186          * LinkUp without a verify capability sequence.
9187          * This state is in simulator v37 and later.
9188          */
9189         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9190         if (ret != HCMD_SUCCESS) {
9191                 dd_dev_err(dd,
9192                            "%s: set physical link state to quick LinkUp failed with return %d\n",
9193                            __func__, ret);
9194
9195                 set_host_lcb_access(dd);
9196                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9197
9198                 if (ret >= 0)
9199                         ret = -EINVAL;
9200                 return ret;
9201         }
9202
9203         return 0; /* success */
9204 }
9205
9206 /*
9207  * Do all special steps to set up loopback.
9208  */
9209 static int init_loopback(struct hfi1_devdata *dd)
9210 {
9211         dd_dev_info(dd, "Entering loopback mode\n");
9212
9213         /* all loopbacks should disable self GUID check */
9214         write_csr(dd, DC_DC8051_CFG_MODE,
9215                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9216
9217         /*
9218          * The simulator has only one loopback option - LCB.  Switch
9219          * to that option, which includes quick link up.
9220          *
9221          * Accept all valid loopback values.
9222          */
9223         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9224             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9225              loopback == LOOPBACK_CABLE)) {
9226                 loopback = LOOPBACK_LCB;
9227                 quick_linkup = 1;
9228                 return 0;
9229         }
9230
9231         /*
9232          * SerDes loopback init sequence is handled in set_local_link_attributes
9233          */
9234         if (loopback == LOOPBACK_SERDES)
9235                 return 0;
9236
9237         /* LCB loopback - handled at poll time */
9238         if (loopback == LOOPBACK_LCB) {
9239                 quick_linkup = 1; /* LCB is always quick linkup */
9240
9241                 /* not supported in emulation due to emulation RTL changes */
9242                 if (dd->icode == ICODE_FPGA_EMULATION) {
9243                         dd_dev_err(dd,
9244                                    "LCB loopback not supported in emulation\n");
9245                         return -EINVAL;
9246                 }
9247                 return 0;
9248         }
9249
9250         /* external cable loopback requires no extra steps */
9251         if (loopback == LOOPBACK_CABLE)
9252                 return 0;
9253
9254         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9255         return -EINVAL;
9256 }
9257
9258 /*
9259  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9260  * used in the Verify Capability link width attribute.
9261  */
9262 static u16 opa_to_vc_link_widths(u16 opa_widths)
9263 {
9264         int i;
9265         u16 result = 0;
9266
9267         static const struct link_bits {
9268                 u16 from;
9269                 u16 to;
9270         } opa_link_xlate[] = {
9271                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9272                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9273                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9274                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9275         };
9276
9277         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9278                 if (opa_widths & opa_link_xlate[i].from)
9279                         result |= opa_link_xlate[i].to;
9280         }
9281         return result;
9282 }
9283
9284 /*
9285  * Set link attributes before moving to polling.
9286  */
9287 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9288 {
9289         struct hfi1_devdata *dd = ppd->dd;
9290         u8 enable_lane_tx;
9291         u8 tx_polarity_inversion;
9292         u8 rx_polarity_inversion;
9293         int ret;
9294         u32 misc_bits = 0;
9295         /* reset our fabric serdes to clear any lingering problems */
9296         fabric_serdes_reset(dd);
9297
9298         /* set the local tx rate - need to read-modify-write */
9299         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9300                                &rx_polarity_inversion, &ppd->local_tx_rate);
9301         if (ret)
9302                 goto set_local_link_attributes_fail;
9303
9304         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9305                 /* set the tx rate to the fastest enabled */
9306                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9307                         ppd->local_tx_rate = 1;
9308                 else
9309                         ppd->local_tx_rate = 0;
9310         } else {
9311                 /* set the tx rate to all enabled */
9312                 ppd->local_tx_rate = 0;
9313                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9314                         ppd->local_tx_rate |= 2;
9315                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9316                         ppd->local_tx_rate |= 1;
9317         }
9318
9319         enable_lane_tx = 0xF; /* enable all four lanes */
9320         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9321                                 rx_polarity_inversion, ppd->local_tx_rate);
9322         if (ret != HCMD_SUCCESS)
9323                 goto set_local_link_attributes_fail;
9324
9325         ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9326         if (ret != HCMD_SUCCESS) {
9327                 dd_dev_err(dd,
9328                            "Failed to set host interface version, return 0x%x\n",
9329                            ret);
9330                 goto set_local_link_attributes_fail;
9331         }
9332
9333         /*
9334          * DC supports continuous updates.
9335          */
9336         ret = write_vc_local_phy(dd,
9337                                  0 /* no power management */,
9338                                  1 /* continuous updates */);
9339         if (ret != HCMD_SUCCESS)
9340                 goto set_local_link_attributes_fail;
9341
9342         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9343         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9344                                     ppd->port_crc_mode_enabled);
9345         if (ret != HCMD_SUCCESS)
9346                 goto set_local_link_attributes_fail;
9347
9348         /*
9349          * SerDes loopback init sequence requires
9350          * setting bit 0 of MISC_CONFIG_BITS
9351          */
9352         if (loopback == LOOPBACK_SERDES)
9353                 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9354
9355         /*
9356          * An external device configuration request is used to reset the LCB
9357          * to retry to obtain operational lanes when the first attempt is
9358          * unsuccesful.
9359          */
9360         if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9361                 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9362
9363         ret = write_vc_local_link_mode(dd, misc_bits, 0,
9364                                        opa_to_vc_link_widths(
9365                                                 ppd->link_width_enabled));
9366         if (ret != HCMD_SUCCESS)
9367                 goto set_local_link_attributes_fail;
9368
9369         /* let peer know who we are */
9370         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9371         if (ret == HCMD_SUCCESS)
9372                 return 0;
9373
9374 set_local_link_attributes_fail:
9375         dd_dev_err(dd,
9376                    "Failed to set local link attributes, return 0x%x\n",
9377                    ret);
9378         return ret;
9379 }
9380
9381 /*
9382  * Call this to start the link.
9383  * Do not do anything if the link is disabled.
9384  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9385  */
9386 int start_link(struct hfi1_pportdata *ppd)
9387 {
9388         /*
9389          * Tune the SerDes to a ballpark setting for optimal signal and bit
9390          * error rate.  Needs to be done before starting the link.
9391          */
9392         tune_serdes(ppd);
9393
9394         if (!ppd->driver_link_ready) {
9395                 dd_dev_info(ppd->dd,
9396                             "%s: stopping link start because driver is not ready\n",
9397                             __func__);
9398                 return 0;
9399         }
9400
9401         /*
9402          * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9403          * pkey table can be configured properly if the HFI unit is connected
9404          * to switch port with MgmtAllowed=NO
9405          */
9406         clear_full_mgmt_pkey(ppd);
9407
9408         return set_link_state(ppd, HLS_DN_POLL);
9409 }
9410
9411 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9412 {
9413         struct hfi1_devdata *dd = ppd->dd;
9414         u64 mask;
9415         unsigned long timeout;
9416
9417         /*
9418          * Some QSFP cables have a quirk that asserts the IntN line as a side
9419          * effect of power up on plug-in. We ignore this false positive
9420          * interrupt until the module has finished powering up by waiting for
9421          * a minimum timeout of the module inrush initialization time of
9422          * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9423          * module have stabilized.
9424          */
9425         msleep(500);
9426
9427         /*
9428          * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9429          */
9430         timeout = jiffies + msecs_to_jiffies(2000);
9431         while (1) {
9432                 mask = read_csr(dd, dd->hfi1_id ?
9433                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9434                 if (!(mask & QSFP_HFI0_INT_N))
9435                         break;
9436                 if (time_after(jiffies, timeout)) {
9437                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9438                                     __func__);
9439                         break;
9440                 }
9441                 udelay(2);
9442         }
9443 }
9444
9445 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9446 {
9447         struct hfi1_devdata *dd = ppd->dd;
9448         u64 mask;
9449
9450         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9451         if (enable) {
9452                 /*
9453                  * Clear the status register to avoid an immediate interrupt
9454                  * when we re-enable the IntN pin
9455                  */
9456                 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9457                           QSFP_HFI0_INT_N);
9458                 mask |= (u64)QSFP_HFI0_INT_N;
9459         } else {
9460                 mask &= ~(u64)QSFP_HFI0_INT_N;
9461         }
9462         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9463 }
9464
9465 int reset_qsfp(struct hfi1_pportdata *ppd)
9466 {
9467         struct hfi1_devdata *dd = ppd->dd;
9468         u64 mask, qsfp_mask;
9469
9470         /* Disable INT_N from triggering QSFP interrupts */
9471         set_qsfp_int_n(ppd, 0);
9472
9473         /* Reset the QSFP */
9474         mask = (u64)QSFP_HFI0_RESET_N;
9475
9476         qsfp_mask = read_csr(dd,
9477                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9478         qsfp_mask &= ~mask;
9479         write_csr(dd,
9480                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9481
9482         udelay(10);
9483
9484         qsfp_mask |= mask;
9485         write_csr(dd,
9486                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9487
9488         wait_for_qsfp_init(ppd);
9489
9490         /*
9491          * Allow INT_N to trigger the QSFP interrupt to watch
9492          * for alarms and warnings
9493          */
9494         set_qsfp_int_n(ppd, 1);
9495
9496         /*
9497          * After the reset, AOC transmitters are enabled by default. They need
9498          * to be turned off to complete the QSFP setup before they can be
9499          * enabled again.
9500          */
9501         return set_qsfp_tx(ppd, 0);
9502 }
9503
9504 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9505                                         u8 *qsfp_interrupt_status)
9506 {
9507         struct hfi1_devdata *dd = ppd->dd;
9508
9509         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9510             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9511                 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9512                            __func__);
9513
9514         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9515             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9516                 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9517                            __func__);
9518
9519         /*
9520          * The remaining alarms/warnings don't matter if the link is down.
9521          */
9522         if (ppd->host_link_state & HLS_DOWN)
9523                 return 0;
9524
9525         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9526             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9527                 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9528                            __func__);
9529
9530         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9531             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9532                 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9533                            __func__);
9534
9535         /* Byte 2 is vendor specific */
9536
9537         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9538             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9539                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9540                            __func__);
9541
9542         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9543             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9544                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9545                            __func__);
9546
9547         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9548             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9549                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9550                            __func__);
9551
9552         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9553             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9554                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9555                            __func__);
9556
9557         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9558             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9559                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9560                            __func__);
9561
9562         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9563             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9564                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9565                            __func__);
9566
9567         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9568             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9569                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9570                            __func__);
9571
9572         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9573             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9574                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9575                            __func__);
9576
9577         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9578             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9579                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9580                            __func__);
9581
9582         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9583             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9584                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9585                            __func__);
9586
9587         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9588             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9589                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9590                            __func__);
9591
9592         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9593             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9594                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9595                            __func__);
9596
9597         /* Bytes 9-10 and 11-12 are reserved */
9598         /* Bytes 13-15 are vendor specific */
9599
9600         return 0;
9601 }
9602
9603 /* This routine will only be scheduled if the QSFP module present is asserted */
9604 void qsfp_event(struct work_struct *work)
9605 {
9606         struct qsfp_data *qd;
9607         struct hfi1_pportdata *ppd;
9608         struct hfi1_devdata *dd;
9609
9610         qd = container_of(work, struct qsfp_data, qsfp_work);
9611         ppd = qd->ppd;
9612         dd = ppd->dd;
9613
9614         /* Sanity check */
9615         if (!qsfp_mod_present(ppd))
9616                 return;
9617
9618         if (ppd->host_link_state == HLS_DN_DISABLE) {
9619                 dd_dev_info(ppd->dd,
9620                             "%s: stopping link start because link is disabled\n",
9621                             __func__);
9622                 return;
9623         }
9624
9625         /*
9626          * Turn DC back on after cable has been re-inserted. Up until
9627          * now, the DC has been in reset to save power.
9628          */
9629         dc_start(dd);
9630
9631         if (qd->cache_refresh_required) {
9632                 set_qsfp_int_n(ppd, 0);
9633
9634                 wait_for_qsfp_init(ppd);
9635
9636                 /*
9637                  * Allow INT_N to trigger the QSFP interrupt to watch
9638                  * for alarms and warnings
9639                  */
9640                 set_qsfp_int_n(ppd, 1);
9641
9642                 start_link(ppd);
9643         }
9644
9645         if (qd->check_interrupt_flags) {
9646                 u8 qsfp_interrupt_status[16] = {0,};
9647
9648                 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9649                                   &qsfp_interrupt_status[0], 16) != 16) {
9650                         dd_dev_info(dd,
9651                                     "%s: Failed to read status of QSFP module\n",
9652                                     __func__);
9653                 } else {
9654                         unsigned long flags;
9655
9656                         handle_qsfp_error_conditions(
9657                                         ppd, qsfp_interrupt_status);
9658                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9659                         ppd->qsfp_info.check_interrupt_flags = 0;
9660                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9661                                                flags);
9662                 }
9663         }
9664 }
9665
9666 void init_qsfp_int(struct hfi1_devdata *dd)
9667 {
9668         struct hfi1_pportdata *ppd = dd->pport;
9669         u64 qsfp_mask;
9670
9671         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9672         /* Clear current status to avoid spurious interrupts */
9673         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9674                   qsfp_mask);
9675         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9676                   qsfp_mask);
9677
9678         set_qsfp_int_n(ppd, 0);
9679
9680         /* Handle active low nature of INT_N and MODPRST_N pins */
9681         if (qsfp_mod_present(ppd))
9682                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9683         write_csr(dd,
9684                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9685                   qsfp_mask);
9686
9687         /* Enable the appropriate QSFP IRQ source */
9688         if (!dd->hfi1_id)
9689                 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9690         else
9691                 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9692 }
9693
9694 /*
9695  * Do a one-time initialize of the LCB block.
9696  */
9697 static void init_lcb(struct hfi1_devdata *dd)
9698 {
9699         /* simulator does not correctly handle LCB cclk loopback, skip */
9700         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9701                 return;
9702
9703         /* the DC has been reset earlier in the driver load */
9704
9705         /* set LCB for cclk loopback on the port */
9706         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9707         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9708         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9709         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9710         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9711         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9712         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9713 }
9714
9715 /*
9716  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9717  * on error.
9718  */
9719 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9720 {
9721         int ret;
9722         u8 status;
9723
9724         /*
9725          * Report success if not a QSFP or, if it is a QSFP, but the cable is
9726          * not present
9727          */
9728         if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9729                 return 0;
9730
9731         /* read byte 2, the status byte */
9732         ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9733         if (ret < 0)
9734                 return ret;
9735         if (ret != 1)
9736                 return -EIO;
9737
9738         return 0; /* success */
9739 }
9740
9741 /*
9742  * Values for QSFP retry.
9743  *
9744  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9745  * arrived at from experience on a large cluster.
9746  */
9747 #define MAX_QSFP_RETRIES 20
9748 #define QSFP_RETRY_WAIT 500 /* msec */
9749
9750 /*
9751  * Try a QSFP read.  If it fails, schedule a retry for later.
9752  * Called on first link activation after driver load.
9753  */
9754 static void try_start_link(struct hfi1_pportdata *ppd)
9755 {
9756         if (test_qsfp_read(ppd)) {
9757                 /* read failed */
9758                 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9759                         dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9760                         return;
9761                 }
9762                 dd_dev_info(ppd->dd,
9763                             "QSFP not responding, waiting and retrying %d\n",
9764                             (int)ppd->qsfp_retry_count);
9765                 ppd->qsfp_retry_count++;
9766                 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9767                                    msecs_to_jiffies(QSFP_RETRY_WAIT));
9768                 return;
9769         }
9770         ppd->qsfp_retry_count = 0;
9771
9772         start_link(ppd);
9773 }
9774
9775 /*
9776  * Workqueue function to start the link after a delay.
9777  */
9778 void handle_start_link(struct work_struct *work)
9779 {
9780         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9781                                                   start_link_work.work);
9782         try_start_link(ppd);
9783 }
9784
9785 int bringup_serdes(struct hfi1_pportdata *ppd)
9786 {
9787         struct hfi1_devdata *dd = ppd->dd;
9788         u64 guid;
9789         int ret;
9790
9791         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9792                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9793
9794         guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9795         if (!guid) {
9796                 if (dd->base_guid)
9797                         guid = dd->base_guid + ppd->port - 1;
9798                 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9799         }
9800
9801         /* Set linkinit_reason on power up per OPA spec */
9802         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9803
9804         /* one-time init of the LCB */
9805         init_lcb(dd);
9806
9807         if (loopback) {
9808                 ret = init_loopback(dd);
9809                 if (ret < 0)
9810                         return ret;
9811         }
9812
9813         get_port_type(ppd);
9814         if (ppd->port_type == PORT_TYPE_QSFP) {
9815                 set_qsfp_int_n(ppd, 0);
9816                 wait_for_qsfp_init(ppd);
9817                 set_qsfp_int_n(ppd, 1);
9818         }
9819
9820         try_start_link(ppd);
9821         return 0;
9822 }
9823
9824 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9825 {
9826         struct hfi1_devdata *dd = ppd->dd;
9827
9828         /*
9829          * Shut down the link and keep it down.   First turn off that the
9830          * driver wants to allow the link to be up (driver_link_ready).
9831          * Then make sure the link is not automatically restarted
9832          * (link_enabled).  Cancel any pending restart.  And finally
9833          * go offline.
9834          */
9835         ppd->driver_link_ready = 0;
9836         ppd->link_enabled = 0;
9837
9838         ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9839         flush_delayed_work(&ppd->start_link_work);
9840         cancel_delayed_work_sync(&ppd->start_link_work);
9841
9842         ppd->offline_disabled_reason =
9843                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9844         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9845                              OPA_LINKDOWN_REASON_REBOOT);
9846         set_link_state(ppd, HLS_DN_OFFLINE);
9847
9848         /* disable the port */
9849         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9850 }
9851
9852 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9853 {
9854         struct hfi1_pportdata *ppd;
9855         int i;
9856
9857         ppd = (struct hfi1_pportdata *)(dd + 1);
9858         for (i = 0; i < dd->num_pports; i++, ppd++) {
9859                 ppd->ibport_data.rvp.rc_acks = NULL;
9860                 ppd->ibport_data.rvp.rc_qacks = NULL;
9861                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9862                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9863                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9864                 if (!ppd->ibport_data.rvp.rc_acks ||
9865                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9866                     !ppd->ibport_data.rvp.rc_qacks)
9867                         return -ENOMEM;
9868         }
9869
9870         return 0;
9871 }
9872
9873 /*
9874  * index is the index into the receive array
9875  */
9876 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9877                   u32 type, unsigned long pa, u16 order)
9878 {
9879         u64 reg;
9880
9881         if (!(dd->flags & HFI1_PRESENT))
9882                 goto done;
9883
9884         if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9885                 pa = 0;
9886                 order = 0;
9887         } else if (type > PT_INVALID) {
9888                 dd_dev_err(dd,
9889                            "unexpected receive array type %u for index %u, not handled\n",
9890                            type, index);
9891                 goto done;
9892         }
9893         trace_hfi1_put_tid(dd, index, type, pa, order);
9894
9895 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9896         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9897                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9898                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9899                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9900         trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9901         writeq(reg, dd->rcvarray_wc + (index * 8));
9902
9903         if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9904                 /*
9905                  * Eager entries are written and flushed
9906                  *
9907                  * Expected entries are flushed every 4 writes
9908                  */
9909                 flush_wc();
9910 done:
9911         return;
9912 }
9913
9914 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9915 {
9916         struct hfi1_devdata *dd = rcd->dd;
9917         u32 i;
9918
9919         /* this could be optimized */
9920         for (i = rcd->eager_base; i < rcd->eager_base +
9921                      rcd->egrbufs.alloced; i++)
9922                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9923
9924         for (i = rcd->expected_base;
9925                         i < rcd->expected_base + rcd->expected_count; i++)
9926                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9927 }
9928
9929 static const char * const ib_cfg_name_strings[] = {
9930         "HFI1_IB_CFG_LIDLMC",
9931         "HFI1_IB_CFG_LWID_DG_ENB",
9932         "HFI1_IB_CFG_LWID_ENB",
9933         "HFI1_IB_CFG_LWID",
9934         "HFI1_IB_CFG_SPD_ENB",
9935         "HFI1_IB_CFG_SPD",
9936         "HFI1_IB_CFG_RXPOL_ENB",
9937         "HFI1_IB_CFG_LREV_ENB",
9938         "HFI1_IB_CFG_LINKLATENCY",
9939         "HFI1_IB_CFG_HRTBT",
9940         "HFI1_IB_CFG_OP_VLS",
9941         "HFI1_IB_CFG_VL_HIGH_CAP",
9942         "HFI1_IB_CFG_VL_LOW_CAP",
9943         "HFI1_IB_CFG_OVERRUN_THRESH",
9944         "HFI1_IB_CFG_PHYERR_THRESH",
9945         "HFI1_IB_CFG_LINKDEFAULT",
9946         "HFI1_IB_CFG_PKEYS",
9947         "HFI1_IB_CFG_MTU",
9948         "HFI1_IB_CFG_LSTATE",
9949         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9950         "HFI1_IB_CFG_PMA_TICKS",
9951         "HFI1_IB_CFG_PORT"
9952 };
9953
9954 static const char *ib_cfg_name(int which)
9955 {
9956         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9957                 return "invalid";
9958         return ib_cfg_name_strings[which];
9959 }
9960
9961 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9962 {
9963         struct hfi1_devdata *dd = ppd->dd;
9964         int val = 0;
9965
9966         switch (which) {
9967         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9968                 val = ppd->link_width_enabled;
9969                 break;
9970         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9971                 val = ppd->link_width_active;
9972                 break;
9973         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9974                 val = ppd->link_speed_enabled;
9975                 break;
9976         case HFI1_IB_CFG_SPD: /* current Link speed */
9977                 val = ppd->link_speed_active;
9978                 break;
9979
9980         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9981         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9982         case HFI1_IB_CFG_LINKLATENCY:
9983                 goto unimplemented;
9984
9985         case HFI1_IB_CFG_OP_VLS:
9986                 val = ppd->actual_vls_operational;
9987                 break;
9988         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9989                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9990                 break;
9991         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9992                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9993                 break;
9994         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9995                 val = ppd->overrun_threshold;
9996                 break;
9997         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9998                 val = ppd->phy_error_threshold;
9999                 break;
10000         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10001                 val = HLS_DEFAULT;
10002                 break;
10003
10004         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10005         case HFI1_IB_CFG_PMA_TICKS:
10006         default:
10007 unimplemented:
10008                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10009                         dd_dev_info(
10010                                 dd,
10011                                 "%s: which %s: not implemented\n",
10012                                 __func__,
10013                                 ib_cfg_name(which));
10014                 break;
10015         }
10016
10017         return val;
10018 }
10019
10020 /*
10021  * The largest MAD packet size.
10022  */
10023 #define MAX_MAD_PACKET 2048
10024
10025 /*
10026  * Return the maximum header bytes that can go on the _wire_
10027  * for this device. This count includes the ICRC which is
10028  * not part of the packet held in memory but it is appended
10029  * by the HW.
10030  * This is dependent on the device's receive header entry size.
10031  * HFI allows this to be set per-receive context, but the
10032  * driver presently enforces a global value.
10033  */
10034 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10035 {
10036         /*
10037          * The maximum non-payload (MTU) bytes in LRH.PktLen are
10038          * the Receive Header Entry Size minus the PBC (or RHF) size
10039          * plus one DW for the ICRC appended by HW.
10040          *
10041          * dd->rcd[0].rcvhdrqentsize is in DW.
10042          * We use rcd[0] as all context will have the same value. Also,
10043          * the first kernel context would have been allocated by now so
10044          * we are guaranteed a valid value.
10045          */
10046         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10047 }
10048
10049 /*
10050  * Set Send Length
10051  * @ppd - per port data
10052  *
10053  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10054  * registers compare against LRH.PktLen, so use the max bytes included
10055  * in the LRH.
10056  *
10057  * This routine changes all VL values except VL15, which it maintains at
10058  * the same value.
10059  */
10060 static void set_send_length(struct hfi1_pportdata *ppd)
10061 {
10062         struct hfi1_devdata *dd = ppd->dd;
10063         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10064         u32 maxvlmtu = dd->vld[15].mtu;
10065         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10066                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10067                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10068         int i, j;
10069         u32 thres;
10070
10071         for (i = 0; i < ppd->vls_supported; i++) {
10072                 if (dd->vld[i].mtu > maxvlmtu)
10073                         maxvlmtu = dd->vld[i].mtu;
10074                 if (i <= 3)
10075                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10076                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10077                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10078                 else
10079                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10080                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10081                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10082         }
10083         write_csr(dd, SEND_LEN_CHECK0, len1);
10084         write_csr(dd, SEND_LEN_CHECK1, len2);
10085         /* adjust kernel credit return thresholds based on new MTUs */
10086         /* all kernel receive contexts have the same hdrqentsize */
10087         for (i = 0; i < ppd->vls_supported; i++) {
10088                 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10089                             sc_mtu_to_threshold(dd->vld[i].sc,
10090                                                 dd->vld[i].mtu,
10091                                                 dd->rcd[0]->rcvhdrqentsize));
10092                 for (j = 0; j < INIT_SC_PER_VL; j++)
10093                         sc_set_cr_threshold(
10094                                         pio_select_send_context_vl(dd, j, i),
10095                                             thres);
10096         }
10097         thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10098                     sc_mtu_to_threshold(dd->vld[15].sc,
10099                                         dd->vld[15].mtu,
10100                                         dd->rcd[0]->rcvhdrqentsize));
10101         sc_set_cr_threshold(dd->vld[15].sc, thres);
10102
10103         /* Adjust maximum MTU for the port in DC */
10104         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10105                 (ilog2(maxvlmtu >> 8) + 1);
10106         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10107         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10108         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10109                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10110         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10111 }
10112
10113 static void set_lidlmc(struct hfi1_pportdata *ppd)
10114 {
10115         int i;
10116         u64 sreg = 0;
10117         struct hfi1_devdata *dd = ppd->dd;
10118         u32 mask = ~((1U << ppd->lmc) - 1);
10119         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10120         u32 lid;
10121
10122         /*
10123          * Program 0 in CSR if port lid is extended. This prevents
10124          * 9B packets being sent out for large lids.
10125          */
10126         lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10127         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10128                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10129         c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10130                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10131               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10132                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10133         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10134
10135         /*
10136          * Iterate over all the send contexts and set their SLID check
10137          */
10138         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10139                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10140                (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10141                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10142
10143         for (i = 0; i < chip_send_contexts(dd); i++) {
10144                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10145                           i, (u32)sreg);
10146                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10147         }
10148
10149         /* Now we have to do the same thing for the sdma engines */
10150         sdma_update_lmc(dd, mask, lid);
10151 }
10152
10153 static const char *state_completed_string(u32 completed)
10154 {
10155         static const char * const state_completed[] = {
10156                 "EstablishComm",
10157                 "OptimizeEQ",
10158                 "VerifyCap"
10159         };
10160
10161         if (completed < ARRAY_SIZE(state_completed))
10162                 return state_completed[completed];
10163
10164         return "unknown";
10165 }
10166
10167 static const char all_lanes_dead_timeout_expired[] =
10168         "All lanes were inactive â€“ was the interconnect media removed?";
10169 static const char tx_out_of_policy[] =
10170         "Passing lanes on local port do not meet the local link width policy";
10171 static const char no_state_complete[] =
10172         "State timeout occurred before link partner completed the state";
10173 static const char * const state_complete_reasons[] = {
10174         [0x00] = "Reason unknown",
10175         [0x01] = "Link was halted by driver, refer to LinkDownReason",
10176         [0x02] = "Link partner reported failure",
10177         [0x10] = "Unable to achieve frame sync on any lane",
10178         [0x11] =
10179           "Unable to find a common bit rate with the link partner",
10180         [0x12] =
10181           "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10182         [0x13] =
10183           "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10184         [0x14] = no_state_complete,
10185         [0x15] =
10186           "State timeout occurred before link partner identified equalization presets",
10187         [0x16] =
10188           "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10189         [0x17] = tx_out_of_policy,
10190         [0x20] = all_lanes_dead_timeout_expired,
10191         [0x21] =
10192           "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10193         [0x22] = no_state_complete,
10194         [0x23] =
10195           "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10196         [0x24] = tx_out_of_policy,
10197         [0x30] = all_lanes_dead_timeout_expired,
10198         [0x31] =
10199           "State timeout occurred waiting for host to process received frames",
10200         [0x32] = no_state_complete,
10201         [0x33] =
10202           "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10203         [0x34] = tx_out_of_policy,
10204         [0x35] = "Negotiated link width is mutually exclusive",
10205         [0x36] =
10206           "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10207         [0x37] = "Unable to resolve secure data exchange",
10208 };
10209
10210 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10211                                                      u32 code)
10212 {
10213         const char *str = NULL;
10214
10215         if (code < ARRAY_SIZE(state_complete_reasons))
10216                 str = state_complete_reasons[code];
10217
10218         if (str)
10219                 return str;
10220         return "Reserved";
10221 }
10222
10223 /* describe the given last state complete frame */
10224 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10225                                   const char *prefix)
10226 {
10227         struct hfi1_devdata *dd = ppd->dd;
10228         u32 success;
10229         u32 state;
10230         u32 reason;
10231         u32 lanes;
10232
10233         /*
10234          * Decode frame:
10235          *  [ 0: 0] - success
10236          *  [ 3: 1] - state
10237          *  [ 7: 4] - next state timeout
10238          *  [15: 8] - reason code
10239          *  [31:16] - lanes
10240          */
10241         success = frame & 0x1;
10242         state = (frame >> 1) & 0x7;
10243         reason = (frame >> 8) & 0xff;
10244         lanes = (frame >> 16) & 0xffff;
10245
10246         dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10247                    prefix, frame);
10248         dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10249                    state_completed_string(state), state);
10250         dd_dev_err(dd, "    state successfully completed: %s\n",
10251                    success ? "yes" : "no");
10252         dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10253                    reason, state_complete_reason_code_string(ppd, reason));
10254         dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10255 }
10256
10257 /*
10258  * Read the last state complete frames and explain them.  This routine
10259  * expects to be called if the link went down during link negotiation
10260  * and initialization (LNI).  That is, anywhere between polling and link up.
10261  */
10262 static void check_lni_states(struct hfi1_pportdata *ppd)
10263 {
10264         u32 last_local_state;
10265         u32 last_remote_state;
10266
10267         read_last_local_state(ppd->dd, &last_local_state);
10268         read_last_remote_state(ppd->dd, &last_remote_state);
10269
10270         /*
10271          * Don't report anything if there is nothing to report.  A value of
10272          * 0 means the link was taken down while polling and there was no
10273          * training in-process.
10274          */
10275         if (last_local_state == 0 && last_remote_state == 0)
10276                 return;
10277
10278         decode_state_complete(ppd, last_local_state, "transmitted");
10279         decode_state_complete(ppd, last_remote_state, "received");
10280 }
10281
10282 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10283 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10284 {
10285         u64 reg;
10286         unsigned long timeout;
10287
10288         /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10289         timeout = jiffies + msecs_to_jiffies(wait_ms);
10290         while (1) {
10291                 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10292                 if (reg)
10293                         break;
10294                 if (time_after(jiffies, timeout)) {
10295                         dd_dev_err(dd,
10296                                    "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10297                         return -ETIMEDOUT;
10298                 }
10299                 udelay(2);
10300         }
10301         return 0;
10302 }
10303
10304 /* called when the logical link state is not down as it should be */
10305 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10306 {
10307         struct hfi1_devdata *dd = ppd->dd;
10308
10309         /*
10310          * Bring link up in LCB loopback
10311          */
10312         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10313         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10314                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10315
10316         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10317         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10318         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10319         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10320
10321         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10322         (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10323         udelay(3);
10324         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10325         write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10326
10327         wait_link_transfer_active(dd, 100);
10328
10329         /*
10330          * Bring the link down again.
10331          */
10332         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10333         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10334         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10335
10336         dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10337 }
10338
10339 /*
10340  * Helper for set_link_state().  Do not call except from that routine.
10341  * Expects ppd->hls_mutex to be held.
10342  *
10343  * @rem_reason value to be sent to the neighbor
10344  *
10345  * LinkDownReasons only set if transition succeeds.
10346  */
10347 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10348 {
10349         struct hfi1_devdata *dd = ppd->dd;
10350         u32 previous_state;
10351         int offline_state_ret;
10352         int ret;
10353
10354         update_lcb_cache(dd);
10355
10356         previous_state = ppd->host_link_state;
10357         ppd->host_link_state = HLS_GOING_OFFLINE;
10358
10359         /* start offline transition */
10360         ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10361
10362         if (ret != HCMD_SUCCESS) {
10363                 dd_dev_err(dd,
10364                            "Failed to transition to Offline link state, return %d\n",
10365                            ret);
10366                 return -EINVAL;
10367         }
10368         if (ppd->offline_disabled_reason ==
10369                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10370                 ppd->offline_disabled_reason =
10371                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10372
10373         offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10374         if (offline_state_ret < 0)
10375                 return offline_state_ret;
10376
10377         /* Disabling AOC transmitters */
10378         if (ppd->port_type == PORT_TYPE_QSFP &&
10379             ppd->qsfp_info.limiting_active &&
10380             qsfp_mod_present(ppd)) {
10381                 int ret;
10382
10383                 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10384                 if (ret == 0) {
10385                         set_qsfp_tx(ppd, 0);
10386                         release_chip_resource(dd, qsfp_resource(dd));
10387                 } else {
10388                         /* not fatal, but should warn */
10389                         dd_dev_err(dd,
10390                                    "Unable to acquire lock to turn off QSFP TX\n");
10391                 }
10392         }
10393
10394         /*
10395          * Wait for the offline.Quiet transition if it hasn't happened yet. It
10396          * can take a while for the link to go down.
10397          */
10398         if (offline_state_ret != PLS_OFFLINE_QUIET) {
10399                 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10400                 if (ret < 0)
10401                         return ret;
10402         }
10403
10404         /*
10405          * Now in charge of LCB - must be after the physical state is
10406          * offline.quiet and before host_link_state is changed.
10407          */
10408         set_host_lcb_access(dd);
10409         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10410
10411         /* make sure the logical state is also down */
10412         ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10413         if (ret)
10414                 force_logical_link_state_down(ppd);
10415
10416         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10417         update_statusp(ppd, IB_PORT_DOWN);
10418
10419         /*
10420          * The LNI has a mandatory wait time after the physical state
10421          * moves to Offline.Quiet.  The wait time may be different
10422          * depending on how the link went down.  The 8051 firmware
10423          * will observe the needed wait time and only move to ready
10424          * when that is completed.  The largest of the quiet timeouts
10425          * is 6s, so wait that long and then at least 0.5s more for
10426          * other transitions, and another 0.5s for a buffer.
10427          */
10428         ret = wait_fm_ready(dd, 7000);
10429         if (ret) {
10430                 dd_dev_err(dd,
10431                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10432                 /* state is really offline, so make it so */
10433                 ppd->host_link_state = HLS_DN_OFFLINE;
10434                 return ret;
10435         }
10436
10437         /*
10438          * The state is now offline and the 8051 is ready to accept host
10439          * requests.
10440          *      - change our state
10441          *      - notify others if we were previously in a linkup state
10442          */
10443         ppd->host_link_state = HLS_DN_OFFLINE;
10444         if (previous_state & HLS_UP) {
10445                 /* went down while link was up */
10446                 handle_linkup_change(dd, 0);
10447         } else if (previous_state
10448                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10449                 /* went down while attempting link up */
10450                 check_lni_states(ppd);
10451
10452                 /* The QSFP doesn't need to be reset on LNI failure */
10453                 ppd->qsfp_info.reset_needed = 0;
10454         }
10455
10456         /* the active link width (downgrade) is 0 on link down */
10457         ppd->link_width_active = 0;
10458         ppd->link_width_downgrade_tx_active = 0;
10459         ppd->link_width_downgrade_rx_active = 0;
10460         ppd->current_egress_rate = 0;
10461         return 0;
10462 }
10463
10464 /* return the link state name */
10465 static const char *link_state_name(u32 state)
10466 {
10467         const char *name;
10468         int n = ilog2(state);
10469         static const char * const names[] = {
10470                 [__HLS_UP_INIT_BP]       = "INIT",
10471                 [__HLS_UP_ARMED_BP]      = "ARMED",
10472                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10473                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10474                 [__HLS_DN_POLL_BP]       = "POLL",
10475                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
10476                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10477                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10478                 [__HLS_GOING_UP_BP]      = "GOING_UP",
10479                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10480                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10481         };
10482
10483         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10484         return name ? name : "unknown";
10485 }
10486
10487 /* return the link state reason name */
10488 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10489 {
10490         if (state == HLS_UP_INIT) {
10491                 switch (ppd->linkinit_reason) {
10492                 case OPA_LINKINIT_REASON_LINKUP:
10493                         return "(LINKUP)";
10494                 case OPA_LINKINIT_REASON_FLAPPING:
10495                         return "(FLAPPING)";
10496                 case OPA_LINKINIT_OUTSIDE_POLICY:
10497                         return "(OUTSIDE_POLICY)";
10498                 case OPA_LINKINIT_QUARANTINED:
10499                         return "(QUARANTINED)";
10500                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10501                         return "(INSUFIC_CAPABILITY)";
10502                 default:
10503                         break;
10504                 }
10505         }
10506         return "";
10507 }
10508
10509 /*
10510  * driver_pstate - convert the driver's notion of a port's
10511  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10512  * Return -1 (converted to a u32) to indicate error.
10513  */
10514 u32 driver_pstate(struct hfi1_pportdata *ppd)
10515 {
10516         switch (ppd->host_link_state) {
10517         case HLS_UP_INIT:
10518         case HLS_UP_ARMED:
10519         case HLS_UP_ACTIVE:
10520                 return IB_PORTPHYSSTATE_LINKUP;
10521         case HLS_DN_POLL:
10522                 return IB_PORTPHYSSTATE_POLLING;
10523         case HLS_DN_DISABLE:
10524                 return IB_PORTPHYSSTATE_DISABLED;
10525         case HLS_DN_OFFLINE:
10526                 return OPA_PORTPHYSSTATE_OFFLINE;
10527         case HLS_VERIFY_CAP:
10528                 return IB_PORTPHYSSTATE_TRAINING;
10529         case HLS_GOING_UP:
10530                 return IB_PORTPHYSSTATE_TRAINING;
10531         case HLS_GOING_OFFLINE:
10532                 return OPA_PORTPHYSSTATE_OFFLINE;
10533         case HLS_LINK_COOLDOWN:
10534                 return OPA_PORTPHYSSTATE_OFFLINE;
10535         case HLS_DN_DOWNDEF:
10536         default:
10537                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10538                            ppd->host_link_state);
10539                 return  -1;
10540         }
10541 }
10542
10543 /*
10544  * driver_lstate - convert the driver's notion of a port's
10545  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10546  * (converted to a u32) to indicate error.
10547  */
10548 u32 driver_lstate(struct hfi1_pportdata *ppd)
10549 {
10550         if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10551                 return IB_PORT_DOWN;
10552
10553         switch (ppd->host_link_state & HLS_UP) {
10554         case HLS_UP_INIT:
10555                 return IB_PORT_INIT;
10556         case HLS_UP_ARMED:
10557                 return IB_PORT_ARMED;
10558         case HLS_UP_ACTIVE:
10559                 return IB_PORT_ACTIVE;
10560         default:
10561                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10562                            ppd->host_link_state);
10563         return -1;
10564         }
10565 }
10566
10567 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10568                           u8 neigh_reason, u8 rem_reason)
10569 {
10570         if (ppd->local_link_down_reason.latest == 0 &&
10571             ppd->neigh_link_down_reason.latest == 0) {
10572                 ppd->local_link_down_reason.latest = lcl_reason;
10573                 ppd->neigh_link_down_reason.latest = neigh_reason;
10574                 ppd->remote_link_down_reason = rem_reason;
10575         }
10576 }
10577
10578 /**
10579  * data_vls_operational() - Verify if data VL BCT credits and MTU
10580  *                          are both set.
10581  * @ppd: pointer to hfi1_pportdata structure
10582  *
10583  * Return: true - Ok, false -otherwise.
10584  */
10585 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10586 {
10587         int i;
10588         u64 reg;
10589
10590         if (!ppd->actual_vls_operational)
10591                 return false;
10592
10593         for (i = 0; i < ppd->vls_supported; i++) {
10594                 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10595                 if ((reg && !ppd->dd->vld[i].mtu) ||
10596                     (!reg && ppd->dd->vld[i].mtu))
10597                         return false;
10598         }
10599
10600         return true;
10601 }
10602
10603 /*
10604  * Change the physical and/or logical link state.
10605  *
10606  * Do not call this routine while inside an interrupt.  It contains
10607  * calls to routines that can take multiple seconds to finish.
10608  *
10609  * Returns 0 on success, -errno on failure.
10610  */
10611 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10612 {
10613         struct hfi1_devdata *dd = ppd->dd;
10614         struct ib_event event = {.device = NULL};
10615         int ret1, ret = 0;
10616         int orig_new_state, poll_bounce;
10617
10618         mutex_lock(&ppd->hls_lock);
10619
10620         orig_new_state = state;
10621         if (state == HLS_DN_DOWNDEF)
10622                 state = HLS_DEFAULT;
10623
10624         /* interpret poll -> poll as a link bounce */
10625         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10626                       state == HLS_DN_POLL;
10627
10628         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10629                     link_state_name(ppd->host_link_state),
10630                     link_state_name(orig_new_state),
10631                     poll_bounce ? "(bounce) " : "",
10632                     link_state_reason_name(ppd, state));
10633
10634         /*
10635          * If we're going to a (HLS_*) link state that implies the logical
10636          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10637          * reset is_sm_config_started to 0.
10638          */
10639         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10640                 ppd->is_sm_config_started = 0;
10641
10642         /*
10643          * Do nothing if the states match.  Let a poll to poll link bounce
10644          * go through.
10645          */
10646         if (ppd->host_link_state == state && !poll_bounce)
10647                 goto done;
10648
10649         switch (state) {
10650         case HLS_UP_INIT:
10651                 if (ppd->host_link_state == HLS_DN_POLL &&
10652                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10653                         /*
10654                          * Quick link up jumps from polling to here.
10655                          *
10656                          * Whether in normal or loopback mode, the
10657                          * simulator jumps from polling to link up.
10658                          * Accept that here.
10659                          */
10660                         /* OK */
10661                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10662                         goto unexpected;
10663                 }
10664
10665                 /*
10666                  * Wait for Link_Up physical state.
10667                  * Physical and Logical states should already be
10668                  * be transitioned to LinkUp and LinkInit respectively.
10669                  */
10670                 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10671                 if (ret) {
10672                         dd_dev_err(dd,
10673                                    "%s: physical state did not change to LINK-UP\n",
10674                                    __func__);
10675                         break;
10676                 }
10677
10678                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10679                 if (ret) {
10680                         dd_dev_err(dd,
10681                                    "%s: logical state did not change to INIT\n",
10682                                    __func__);
10683                         break;
10684                 }
10685
10686                 /* clear old transient LINKINIT_REASON code */
10687                 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10688                         ppd->linkinit_reason =
10689                                 OPA_LINKINIT_REASON_LINKUP;
10690
10691                 /* enable the port */
10692                 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10693
10694                 handle_linkup_change(dd, 1);
10695                 pio_kernel_linkup(dd);
10696
10697                 /*
10698                  * After link up, a new link width will have been set.
10699                  * Update the xmit counters with regards to the new
10700                  * link width.
10701                  */
10702                 update_xmit_counters(ppd, ppd->link_width_active);
10703
10704                 ppd->host_link_state = HLS_UP_INIT;
10705                 update_statusp(ppd, IB_PORT_INIT);
10706                 break;
10707         case HLS_UP_ARMED:
10708                 if (ppd->host_link_state != HLS_UP_INIT)
10709                         goto unexpected;
10710
10711                 if (!data_vls_operational(ppd)) {
10712                         dd_dev_err(dd,
10713                                    "%s: Invalid data VL credits or mtu\n",
10714                                    __func__);
10715                         ret = -EINVAL;
10716                         break;
10717                 }
10718
10719                 set_logical_state(dd, LSTATE_ARMED);
10720                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10721                 if (ret) {
10722                         dd_dev_err(dd,
10723                                    "%s: logical state did not change to ARMED\n",
10724                                    __func__);
10725                         break;
10726                 }
10727                 ppd->host_link_state = HLS_UP_ARMED;
10728                 update_statusp(ppd, IB_PORT_ARMED);
10729                 /*
10730                  * The simulator does not currently implement SMA messages,
10731                  * so neighbor_normal is not set.  Set it here when we first
10732                  * move to Armed.
10733                  */
10734                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10735                         ppd->neighbor_normal = 1;
10736                 break;
10737         case HLS_UP_ACTIVE:
10738                 if (ppd->host_link_state != HLS_UP_ARMED)
10739                         goto unexpected;
10740
10741                 set_logical_state(dd, LSTATE_ACTIVE);
10742                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10743                 if (ret) {
10744                         dd_dev_err(dd,
10745                                    "%s: logical state did not change to ACTIVE\n",
10746                                    __func__);
10747                 } else {
10748                         /* tell all engines to go running */
10749                         sdma_all_running(dd);
10750                         ppd->host_link_state = HLS_UP_ACTIVE;
10751                         update_statusp(ppd, IB_PORT_ACTIVE);
10752
10753                         /* Signal the IB layer that the port has went active */
10754                         event.device = &dd->verbs_dev.rdi.ibdev;
10755                         event.element.port_num = ppd->port;
10756                         event.event = IB_EVENT_PORT_ACTIVE;
10757                 }
10758                 break;
10759         case HLS_DN_POLL:
10760                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10761                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10762                     dd->dc_shutdown)
10763                         dc_start(dd);
10764                 /* Hand LED control to the DC */
10765                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10766
10767                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10768                         u8 tmp = ppd->link_enabled;
10769
10770                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10771                         if (ret) {
10772                                 ppd->link_enabled = tmp;
10773                                 break;
10774                         }
10775                         ppd->remote_link_down_reason = 0;
10776
10777                         if (ppd->driver_link_ready)
10778                                 ppd->link_enabled = 1;
10779                 }
10780
10781                 set_all_slowpath(ppd->dd);
10782                 ret = set_local_link_attributes(ppd);
10783                 if (ret)
10784                         break;
10785
10786                 ppd->port_error_action = 0;
10787
10788                 if (quick_linkup) {
10789                         /* quick linkup does not go into polling */
10790                         ret = do_quick_linkup(dd);
10791                 } else {
10792                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10793                         if (!ret1)
10794                                 ret1 = wait_phys_link_out_of_offline(ppd,
10795                                                                      3000);
10796                         if (ret1 != HCMD_SUCCESS) {
10797                                 dd_dev_err(dd,
10798                                            "Failed to transition to Polling link state, return 0x%x\n",
10799                                            ret1);
10800                                 ret = -EINVAL;
10801                         }
10802                 }
10803
10804                 /*
10805                  * Change the host link state after requesting DC8051 to
10806                  * change its physical state so that we can ignore any
10807                  * interrupt with stale LNI(XX) error, which will not be
10808                  * cleared until DC8051 transitions to Polling state.
10809                  */
10810                 ppd->host_link_state = HLS_DN_POLL;
10811                 ppd->offline_disabled_reason =
10812                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10813                 /*
10814                  * If an error occurred above, go back to offline.  The
10815                  * caller may reschedule another attempt.
10816                  */
10817                 if (ret)
10818                         goto_offline(ppd, 0);
10819                 else
10820                         log_physical_state(ppd, PLS_POLLING);
10821                 break;
10822         case HLS_DN_DISABLE:
10823                 /* link is disabled */
10824                 ppd->link_enabled = 0;
10825
10826                 /* allow any state to transition to disabled */
10827
10828                 /* must transition to offline first */
10829                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10830                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10831                         if (ret)
10832                                 break;
10833                         ppd->remote_link_down_reason = 0;
10834                 }
10835
10836                 if (!dd->dc_shutdown) {
10837                         ret1 = set_physical_link_state(dd, PLS_DISABLED);
10838                         if (ret1 != HCMD_SUCCESS) {
10839                                 dd_dev_err(dd,
10840                                            "Failed to transition to Disabled link state, return 0x%x\n",
10841                                            ret1);
10842                                 ret = -EINVAL;
10843                                 break;
10844                         }
10845                         ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10846                         if (ret) {
10847                                 dd_dev_err(dd,
10848                                            "%s: physical state did not change to DISABLED\n",
10849                                            __func__);
10850                                 break;
10851                         }
10852                         dc_shutdown(dd);
10853                 }
10854                 ppd->host_link_state = HLS_DN_DISABLE;
10855                 break;
10856         case HLS_DN_OFFLINE:
10857                 if (ppd->host_link_state == HLS_DN_DISABLE)
10858                         dc_start(dd);
10859
10860                 /* allow any state to transition to offline */
10861                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10862                 if (!ret)
10863                         ppd->remote_link_down_reason = 0;
10864                 break;
10865         case HLS_VERIFY_CAP:
10866                 if (ppd->host_link_state != HLS_DN_POLL)
10867                         goto unexpected;
10868                 ppd->host_link_state = HLS_VERIFY_CAP;
10869                 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10870                 break;
10871         case HLS_GOING_UP:
10872                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10873                         goto unexpected;
10874
10875                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10876                 if (ret1 != HCMD_SUCCESS) {
10877                         dd_dev_err(dd,
10878                                    "Failed to transition to link up state, return 0x%x\n",
10879                                    ret1);
10880                         ret = -EINVAL;
10881                         break;
10882                 }
10883                 ppd->host_link_state = HLS_GOING_UP;
10884                 break;
10885
10886         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10887         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10888         default:
10889                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10890                             __func__, state);
10891                 ret = -EINVAL;
10892                 break;
10893         }
10894
10895         goto done;
10896
10897 unexpected:
10898         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10899                    __func__, link_state_name(ppd->host_link_state),
10900                    link_state_name(state));
10901         ret = -EINVAL;
10902
10903 done:
10904         mutex_unlock(&ppd->hls_lock);
10905
10906         if (event.device)
10907                 ib_dispatch_event(&event);
10908
10909         return ret;
10910 }
10911
10912 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10913 {
10914         u64 reg;
10915         int ret = 0;
10916
10917         switch (which) {
10918         case HFI1_IB_CFG_LIDLMC:
10919                 set_lidlmc(ppd);
10920                 break;
10921         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10922                 /*
10923                  * The VL Arbitrator high limit is sent in units of 4k
10924                  * bytes, while HFI stores it in units of 64 bytes.
10925                  */
10926                 val *= 4096 / 64;
10927                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10928                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10929                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10930                 break;
10931         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10932                 /* HFI only supports POLL as the default link down state */
10933                 if (val != HLS_DN_POLL)
10934                         ret = -EINVAL;
10935                 break;
10936         case HFI1_IB_CFG_OP_VLS:
10937                 if (ppd->vls_operational != val) {
10938                         ppd->vls_operational = val;
10939                         if (!ppd->port)
10940                                 ret = -EINVAL;
10941                 }
10942                 break;
10943         /*
10944          * For link width, link width downgrade, and speed enable, always AND
10945          * the setting with what is actually supported.  This has two benefits.
10946          * First, enabled can't have unsupported values, no matter what the
10947          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10948          * "fill in with your supported value" have all the bits in the
10949          * field set, so simply ANDing with supported has the desired result.
10950          */
10951         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10952                 ppd->link_width_enabled = val & ppd->link_width_supported;
10953                 break;
10954         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10955                 ppd->link_width_downgrade_enabled =
10956                                 val & ppd->link_width_downgrade_supported;
10957                 break;
10958         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10959                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10960                 break;
10961         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10962                 /*
10963                  * HFI does not follow IB specs, save this value
10964                  * so we can report it, if asked.
10965                  */
10966                 ppd->overrun_threshold = val;
10967                 break;
10968         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10969                 /*
10970                  * HFI does not follow IB specs, save this value
10971                  * so we can report it, if asked.
10972                  */
10973                 ppd->phy_error_threshold = val;
10974                 break;
10975
10976         case HFI1_IB_CFG_MTU:
10977                 set_send_length(ppd);
10978                 break;
10979
10980         case HFI1_IB_CFG_PKEYS:
10981                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10982                         set_partition_keys(ppd);
10983                 break;
10984
10985         default:
10986                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10987                         dd_dev_info(ppd->dd,
10988                                     "%s: which %s, val 0x%x: not implemented\n",
10989                                     __func__, ib_cfg_name(which), val);
10990                 break;
10991         }
10992         return ret;
10993 }
10994
10995 /* begin functions related to vl arbitration table caching */
10996 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10997 {
10998         int i;
10999
11000         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11001                         VL_ARB_LOW_PRIO_TABLE_SIZE);
11002         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11003                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
11004
11005         /*
11006          * Note that we always return values directly from the
11007          * 'vl_arb_cache' (and do no CSR reads) in response to a
11008          * 'Get(VLArbTable)'. This is obviously correct after a
11009          * 'Set(VLArbTable)', since the cache will then be up to
11010          * date. But it's also correct prior to any 'Set(VLArbTable)'
11011          * since then both the cache, and the relevant h/w registers
11012          * will be zeroed.
11013          */
11014
11015         for (i = 0; i < MAX_PRIO_TABLE; i++)
11016                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11017 }
11018
11019 /*
11020  * vl_arb_lock_cache
11021  *
11022  * All other vl_arb_* functions should be called only after locking
11023  * the cache.
11024  */
11025 static inline struct vl_arb_cache *
11026 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11027 {
11028         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11029                 return NULL;
11030         spin_lock(&ppd->vl_arb_cache[idx].lock);
11031         return &ppd->vl_arb_cache[idx];
11032 }
11033
11034 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11035 {
11036         spin_unlock(&ppd->vl_arb_cache[idx].lock);
11037 }
11038
11039 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11040                              struct ib_vl_weight_elem *vl)
11041 {
11042         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11043 }
11044
11045 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11046                              struct ib_vl_weight_elem *vl)
11047 {
11048         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11049 }
11050
11051 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11052                               struct ib_vl_weight_elem *vl)
11053 {
11054         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11055 }
11056
11057 /* end functions related to vl arbitration table caching */
11058
11059 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11060                           u32 size, struct ib_vl_weight_elem *vl)
11061 {
11062         struct hfi1_devdata *dd = ppd->dd;
11063         u64 reg;
11064         unsigned int i, is_up = 0;
11065         int drain, ret = 0;
11066
11067         mutex_lock(&ppd->hls_lock);
11068
11069         if (ppd->host_link_state & HLS_UP)
11070                 is_up = 1;
11071
11072         drain = !is_ax(dd) && is_up;
11073
11074         if (drain)
11075                 /*
11076                  * Before adjusting VL arbitration weights, empty per-VL
11077                  * FIFOs, otherwise a packet whose VL weight is being
11078                  * set to 0 could get stuck in a FIFO with no chance to
11079                  * egress.
11080                  */
11081                 ret = stop_drain_data_vls(dd);
11082
11083         if (ret) {
11084                 dd_dev_err(
11085                         dd,
11086                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11087                         __func__);
11088                 goto err;
11089         }
11090
11091         for (i = 0; i < size; i++, vl++) {
11092                 /*
11093                  * NOTE: The low priority shift and mask are used here, but
11094                  * they are the same for both the low and high registers.
11095                  */
11096                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11097                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11098                       | (((u64)vl->weight
11099                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11100                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11101                 write_csr(dd, target + (i * 8), reg);
11102         }
11103         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11104
11105         if (drain)
11106                 open_fill_data_vls(dd); /* reopen all VLs */
11107
11108 err:
11109         mutex_unlock(&ppd->hls_lock);
11110
11111         return ret;
11112 }
11113
11114 /*
11115  * Read one credit merge VL register.
11116  */
11117 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11118                            struct vl_limit *vll)
11119 {
11120         u64 reg = read_csr(dd, csr);
11121
11122         vll->dedicated = cpu_to_be16(
11123                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11124                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11125         vll->shared = cpu_to_be16(
11126                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11127                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11128 }
11129
11130 /*
11131  * Read the current credit merge limits.
11132  */
11133 static int get_buffer_control(struct hfi1_devdata *dd,
11134                               struct buffer_control *bc, u16 *overall_limit)
11135 {
11136         u64 reg;
11137         int i;
11138
11139         /* not all entries are filled in */
11140         memset(bc, 0, sizeof(*bc));
11141
11142         /* OPA and HFI have a 1-1 mapping */
11143         for (i = 0; i < TXE_NUM_DATA_VL; i++)
11144                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11145
11146         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11147         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11148
11149         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11150         bc->overall_shared_limit = cpu_to_be16(
11151                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11152                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11153         if (overall_limit)
11154                 *overall_limit = (reg
11155                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11156                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11157         return sizeof(struct buffer_control);
11158 }
11159
11160 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11161 {
11162         u64 reg;
11163         int i;
11164
11165         /* each register contains 16 SC->VLnt mappings, 4 bits each */
11166         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11167         for (i = 0; i < sizeof(u64); i++) {
11168                 u8 byte = *(((u8 *)&reg) + i);
11169
11170                 dp->vlnt[2 * i] = byte & 0xf;
11171                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11172         }
11173
11174         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11175         for (i = 0; i < sizeof(u64); i++) {
11176                 u8 byte = *(((u8 *)&reg) + i);
11177
11178                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11179                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11180         }
11181         return sizeof(struct sc2vlnt);
11182 }
11183
11184 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11185                               struct ib_vl_weight_elem *vl)
11186 {
11187         unsigned int i;
11188
11189         for (i = 0; i < nelems; i++, vl++) {
11190                 vl->vl = 0xf;
11191                 vl->weight = 0;
11192         }
11193 }
11194
11195 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11196 {
11197         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11198                   DC_SC_VL_VAL(15_0,
11199                                0, dp->vlnt[0] & 0xf,
11200                                1, dp->vlnt[1] & 0xf,
11201                                2, dp->vlnt[2] & 0xf,
11202                                3, dp->vlnt[3] & 0xf,
11203                                4, dp->vlnt[4] & 0xf,
11204                                5, dp->vlnt[5] & 0xf,
11205                                6, dp->vlnt[6] & 0xf,
11206                                7, dp->vlnt[7] & 0xf,
11207                                8, dp->vlnt[8] & 0xf,
11208                                9, dp->vlnt[9] & 0xf,
11209                                10, dp->vlnt[10] & 0xf,
11210                                11, dp->vlnt[11] & 0xf,
11211                                12, dp->vlnt[12] & 0xf,
11212                                13, dp->vlnt[13] & 0xf,
11213                                14, dp->vlnt[14] & 0xf,
11214                                15, dp->vlnt[15] & 0xf));
11215         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11216                   DC_SC_VL_VAL(31_16,
11217                                16, dp->vlnt[16] & 0xf,
11218                                17, dp->vlnt[17] & 0xf,
11219                                18, dp->vlnt[18] & 0xf,
11220                                19, dp->vlnt[19] & 0xf,
11221                                20, dp->vlnt[20] & 0xf,
11222                                21, dp->vlnt[21] & 0xf,
11223                                22, dp->vlnt[22] & 0xf,
11224                                23, dp->vlnt[23] & 0xf,
11225                                24, dp->vlnt[24] & 0xf,
11226                                25, dp->vlnt[25] & 0xf,
11227                                26, dp->vlnt[26] & 0xf,
11228                                27, dp->vlnt[27] & 0xf,
11229                                28, dp->vlnt[28] & 0xf,
11230                                29, dp->vlnt[29] & 0xf,
11231                                30, dp->vlnt[30] & 0xf,
11232                                31, dp->vlnt[31] & 0xf));
11233 }
11234
11235 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11236                         u16 limit)
11237 {
11238         if (limit != 0)
11239                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11240                             what, (int)limit, idx);
11241 }
11242
11243 /* change only the shared limit portion of SendCmGLobalCredit */
11244 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11245 {
11246         u64 reg;
11247
11248         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11249         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11250         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11251         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11252 }
11253
11254 /* change only the total credit limit portion of SendCmGLobalCredit */
11255 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11256 {
11257         u64 reg;
11258
11259         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11260         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11261         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11262         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11263 }
11264
11265 /* set the given per-VL shared limit */
11266 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11267 {
11268         u64 reg;
11269         u32 addr;
11270
11271         if (vl < TXE_NUM_DATA_VL)
11272                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11273         else
11274                 addr = SEND_CM_CREDIT_VL15;
11275
11276         reg = read_csr(dd, addr);
11277         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11278         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11279         write_csr(dd, addr, reg);
11280 }
11281
11282 /* set the given per-VL dedicated limit */
11283 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11284 {
11285         u64 reg;
11286         u32 addr;
11287
11288         if (vl < TXE_NUM_DATA_VL)
11289                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11290         else
11291                 addr = SEND_CM_CREDIT_VL15;
11292
11293         reg = read_csr(dd, addr);
11294         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11295         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11296         write_csr(dd, addr, reg);
11297 }
11298
11299 /* spin until the given per-VL status mask bits clear */
11300 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11301                                      const char *which)
11302 {
11303         unsigned long timeout;
11304         u64 reg;
11305
11306         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11307         while (1) {
11308                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11309
11310                 if (reg == 0)
11311                         return; /* success */
11312                 if (time_after(jiffies, timeout))
11313                         break;          /* timed out */
11314                 udelay(1);
11315         }
11316
11317         dd_dev_err(dd,
11318                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11319                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11320         /*
11321          * If this occurs, it is likely there was a credit loss on the link.
11322          * The only recovery from that is a link bounce.
11323          */
11324         dd_dev_err(dd,
11325                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11326 }
11327
11328 /*
11329  * The number of credits on the VLs may be changed while everything
11330  * is "live", but the following algorithm must be followed due to
11331  * how the hardware is actually implemented.  In particular,
11332  * Return_Credit_Status[] is the only correct status check.
11333  *
11334  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11335  *     set Global_Shared_Credit_Limit = 0
11336  *     use_all_vl = 1
11337  * mask0 = all VLs that are changing either dedicated or shared limits
11338  * set Shared_Limit[mask0] = 0
11339  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11340  * if (changing any dedicated limit)
11341  *     mask1 = all VLs that are lowering dedicated limits
11342  *     lower Dedicated_Limit[mask1]
11343  *     spin until Return_Credit_Status[mask1] == 0
11344  *     raise Dedicated_Limits
11345  * raise Shared_Limits
11346  * raise Global_Shared_Credit_Limit
11347  *
11348  * lower = if the new limit is lower, set the limit to the new value
11349  * raise = if the new limit is higher than the current value (may be changed
11350  *      earlier in the algorithm), set the new limit to the new value
11351  */
11352 int set_buffer_control(struct hfi1_pportdata *ppd,
11353                        struct buffer_control *new_bc)
11354 {
11355         struct hfi1_devdata *dd = ppd->dd;
11356         u64 changing_mask, ld_mask, stat_mask;
11357         int change_count;
11358         int i, use_all_mask;
11359         int this_shared_changing;
11360         int vl_count = 0, ret;
11361         /*
11362          * A0: add the variable any_shared_limit_changing below and in the
11363          * algorithm above.  If removing A0 support, it can be removed.
11364          */
11365         int any_shared_limit_changing;
11366         struct buffer_control cur_bc;
11367         u8 changing[OPA_MAX_VLS];
11368         u8 lowering_dedicated[OPA_MAX_VLS];
11369         u16 cur_total;
11370         u32 new_total = 0;
11371         const u64 all_mask =
11372         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11373          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11374          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11375          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11376          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11377          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11378          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11379          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11380          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11381
11382 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11383 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
11384
11385         /* find the new total credits, do sanity check on unused VLs */
11386         for (i = 0; i < OPA_MAX_VLS; i++) {
11387                 if (valid_vl(i)) {
11388                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11389                         continue;
11390                 }
11391                 nonzero_msg(dd, i, "dedicated",
11392                             be16_to_cpu(new_bc->vl[i].dedicated));
11393                 nonzero_msg(dd, i, "shared",
11394                             be16_to_cpu(new_bc->vl[i].shared));
11395                 new_bc->vl[i].dedicated = 0;
11396                 new_bc->vl[i].shared = 0;
11397         }
11398         new_total += be16_to_cpu(new_bc->overall_shared_limit);
11399
11400         /* fetch the current values */
11401         get_buffer_control(dd, &cur_bc, &cur_total);
11402
11403         /*
11404          * Create the masks we will use.
11405          */
11406         memset(changing, 0, sizeof(changing));
11407         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11408         /*
11409          * NOTE: Assumes that the individual VL bits are adjacent and in
11410          * increasing order
11411          */
11412         stat_mask =
11413                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11414         changing_mask = 0;
11415         ld_mask = 0;
11416         change_count = 0;
11417         any_shared_limit_changing = 0;
11418         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11419                 if (!valid_vl(i))
11420                         continue;
11421                 this_shared_changing = new_bc->vl[i].shared
11422                                                 != cur_bc.vl[i].shared;
11423                 if (this_shared_changing)
11424                         any_shared_limit_changing = 1;
11425                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11426                     this_shared_changing) {
11427                         changing[i] = 1;
11428                         changing_mask |= stat_mask;
11429                         change_count++;
11430                 }
11431                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11432                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
11433                         lowering_dedicated[i] = 1;
11434                         ld_mask |= stat_mask;
11435                 }
11436         }
11437
11438         /* bracket the credit change with a total adjustment */
11439         if (new_total > cur_total)
11440                 set_global_limit(dd, new_total);
11441
11442         /*
11443          * Start the credit change algorithm.
11444          */
11445         use_all_mask = 0;
11446         if ((be16_to_cpu(new_bc->overall_shared_limit) <
11447              be16_to_cpu(cur_bc.overall_shared_limit)) ||
11448             (is_ax(dd) && any_shared_limit_changing)) {
11449                 set_global_shared(dd, 0);
11450                 cur_bc.overall_shared_limit = 0;
11451                 use_all_mask = 1;
11452         }
11453
11454         for (i = 0; i < NUM_USABLE_VLS; i++) {
11455                 if (!valid_vl(i))
11456                         continue;
11457
11458                 if (changing[i]) {
11459                         set_vl_shared(dd, i, 0);
11460                         cur_bc.vl[i].shared = 0;
11461                 }
11462         }
11463
11464         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11465                                  "shared");
11466
11467         if (change_count > 0) {
11468                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11469                         if (!valid_vl(i))
11470                                 continue;
11471
11472                         if (lowering_dedicated[i]) {
11473                                 set_vl_dedicated(dd, i,
11474                                                  be16_to_cpu(new_bc->
11475                                                              vl[i].dedicated));
11476                                 cur_bc.vl[i].dedicated =
11477                                                 new_bc->vl[i].dedicated;
11478                         }
11479                 }
11480
11481                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11482
11483                 /* now raise all dedicated that are going up */
11484                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11485                         if (!valid_vl(i))
11486                                 continue;
11487
11488                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
11489                                         be16_to_cpu(cur_bc.vl[i].dedicated))
11490                                 set_vl_dedicated(dd, i,
11491                                                  be16_to_cpu(new_bc->
11492                                                              vl[i].dedicated));
11493                 }
11494         }
11495
11496         /* next raise all shared that are going up */
11497         for (i = 0; i < NUM_USABLE_VLS; i++) {
11498                 if (!valid_vl(i))
11499                         continue;
11500
11501                 if (be16_to_cpu(new_bc->vl[i].shared) >
11502                                 be16_to_cpu(cur_bc.vl[i].shared))
11503                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11504         }
11505
11506         /* finally raise the global shared */
11507         if (be16_to_cpu(new_bc->overall_shared_limit) >
11508             be16_to_cpu(cur_bc.overall_shared_limit))
11509                 set_global_shared(dd,
11510                                   be16_to_cpu(new_bc->overall_shared_limit));
11511
11512         /* bracket the credit change with a total adjustment */
11513         if (new_total < cur_total)
11514                 set_global_limit(dd, new_total);
11515
11516         /*
11517          * Determine the actual number of operational VLS using the number of
11518          * dedicated and shared credits for each VL.
11519          */
11520         if (change_count > 0) {
11521                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11522                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11523                             be16_to_cpu(new_bc->vl[i].shared) > 0)
11524                                 vl_count++;
11525                 ppd->actual_vls_operational = vl_count;
11526                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11527                                     ppd->actual_vls_operational :
11528                                     ppd->vls_operational,
11529                                     NULL);
11530                 if (ret == 0)
11531                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11532                                            ppd->actual_vls_operational :
11533                                            ppd->vls_operational, NULL);
11534                 if (ret)
11535                         return ret;
11536         }
11537         return 0;
11538 }
11539
11540 /*
11541  * Read the given fabric manager table. Return the size of the
11542  * table (in bytes) on success, and a negative error code on
11543  * failure.
11544  */
11545 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11546
11547 {
11548         int size;
11549         struct vl_arb_cache *vlc;
11550
11551         switch (which) {
11552         case FM_TBL_VL_HIGH_ARB:
11553                 size = 256;
11554                 /*
11555                  * OPA specifies 128 elements (of 2 bytes each), though
11556                  * HFI supports only 16 elements in h/w.
11557                  */
11558                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11559                 vl_arb_get_cache(vlc, t);
11560                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11561                 break;
11562         case FM_TBL_VL_LOW_ARB:
11563                 size = 256;
11564                 /*
11565                  * OPA specifies 128 elements (of 2 bytes each), though
11566                  * HFI supports only 16 elements in h/w.
11567                  */
11568                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11569                 vl_arb_get_cache(vlc, t);
11570                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11571                 break;
11572         case FM_TBL_BUFFER_CONTROL:
11573                 size = get_buffer_control(ppd->dd, t, NULL);
11574                 break;
11575         case FM_TBL_SC2VLNT:
11576                 size = get_sc2vlnt(ppd->dd, t);
11577                 break;
11578         case FM_TBL_VL_PREEMPT_ELEMS:
11579                 size = 256;
11580                 /* OPA specifies 128 elements, of 2 bytes each */
11581                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11582                 break;
11583         case FM_TBL_VL_PREEMPT_MATRIX:
11584                 size = 256;
11585                 /*
11586                  * OPA specifies that this is the same size as the VL
11587                  * arbitration tables (i.e., 256 bytes).
11588                  */
11589                 break;
11590         default:
11591                 return -EINVAL;
11592         }
11593         return size;
11594 }
11595
11596 /*
11597  * Write the given fabric manager table.
11598  */
11599 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11600 {
11601         int ret = 0;
11602         struct vl_arb_cache *vlc;
11603
11604         switch (which) {
11605         case FM_TBL_VL_HIGH_ARB:
11606                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11607                 if (vl_arb_match_cache(vlc, t)) {
11608                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11609                         break;
11610                 }
11611                 vl_arb_set_cache(vlc, t);
11612                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11613                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11614                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11615                 break;
11616         case FM_TBL_VL_LOW_ARB:
11617                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11618                 if (vl_arb_match_cache(vlc, t)) {
11619                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11620                         break;
11621                 }
11622                 vl_arb_set_cache(vlc, t);
11623                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11624                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11625                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11626                 break;
11627         case FM_TBL_BUFFER_CONTROL:
11628                 ret = set_buffer_control(ppd, t);
11629                 break;
11630         case FM_TBL_SC2VLNT:
11631                 set_sc2vlnt(ppd->dd, t);
11632                 break;
11633         default:
11634                 ret = -EINVAL;
11635         }
11636         return ret;
11637 }
11638
11639 /*
11640  * Disable all data VLs.
11641  *
11642  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11643  */
11644 static int disable_data_vls(struct hfi1_devdata *dd)
11645 {
11646         if (is_ax(dd))
11647                 return 1;
11648
11649         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11650
11651         return 0;
11652 }
11653
11654 /*
11655  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11656  * Just re-enables all data VLs (the "fill" part happens
11657  * automatically - the name was chosen for symmetry with
11658  * stop_drain_data_vls()).
11659  *
11660  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11661  */
11662 int open_fill_data_vls(struct hfi1_devdata *dd)
11663 {
11664         if (is_ax(dd))
11665                 return 1;
11666
11667         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11668
11669         return 0;
11670 }
11671
11672 /*
11673  * drain_data_vls() - assumes that disable_data_vls() has been called,
11674  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11675  * engines to drop to 0.
11676  */
11677 static void drain_data_vls(struct hfi1_devdata *dd)
11678 {
11679         sc_wait(dd);
11680         sdma_wait(dd);
11681         pause_for_credit_return(dd);
11682 }
11683
11684 /*
11685  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11686  *
11687  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11688  * meant to be used like this:
11689  *
11690  * stop_drain_data_vls(dd);
11691  * // do things with per-VL resources
11692  * open_fill_data_vls(dd);
11693  */
11694 int stop_drain_data_vls(struct hfi1_devdata *dd)
11695 {
11696         int ret;
11697
11698         ret = disable_data_vls(dd);
11699         if (ret == 0)
11700                 drain_data_vls(dd);
11701
11702         return ret;
11703 }
11704
11705 /*
11706  * Convert a nanosecond time to a cclock count.  No matter how slow
11707  * the cclock, a non-zero ns will always have a non-zero result.
11708  */
11709 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11710 {
11711         u32 cclocks;
11712
11713         if (dd->icode == ICODE_FPGA_EMULATION)
11714                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11715         else  /* simulation pretends to be ASIC */
11716                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11717         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11718                 cclocks = 1;
11719         return cclocks;
11720 }
11721
11722 /*
11723  * Convert a cclock count to nanoseconds. Not matter how slow
11724  * the cclock, a non-zero cclocks will always have a non-zero result.
11725  */
11726 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11727 {
11728         u32 ns;
11729
11730         if (dd->icode == ICODE_FPGA_EMULATION)
11731                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11732         else  /* simulation pretends to be ASIC */
11733                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11734         if (cclocks && !ns)
11735                 ns = 1;
11736         return ns;
11737 }
11738
11739 /*
11740  * Dynamically adjust the receive interrupt timeout for a context based on
11741  * incoming packet rate.
11742  *
11743  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11744  */
11745 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11746 {
11747         struct hfi1_devdata *dd = rcd->dd;
11748         u32 timeout = rcd->rcvavail_timeout;
11749
11750         /*
11751          * This algorithm doubles or halves the timeout depending on whether
11752          * the number of packets received in this interrupt were less than or
11753          * greater equal the interrupt count.
11754          *
11755          * The calculations below do not allow a steady state to be achieved.
11756          * Only at the endpoints it is possible to have an unchanging
11757          * timeout.
11758          */
11759         if (npkts < rcv_intr_count) {
11760                 /*
11761                  * Not enough packets arrived before the timeout, adjust
11762                  * timeout downward.
11763                  */
11764                 if (timeout < 2) /* already at minimum? */
11765                         return;
11766                 timeout >>= 1;
11767         } else {
11768                 /*
11769                  * More than enough packets arrived before the timeout, adjust
11770                  * timeout upward.
11771                  */
11772                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11773                         return;
11774                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11775         }
11776
11777         rcd->rcvavail_timeout = timeout;
11778         /*
11779          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11780          * been verified to be in range
11781          */
11782         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11783                         (u64)timeout <<
11784                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11785 }
11786
11787 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11788                     u32 intr_adjust, u32 npkts)
11789 {
11790         struct hfi1_devdata *dd = rcd->dd;
11791         u64 reg;
11792         u32 ctxt = rcd->ctxt;
11793
11794         /*
11795          * Need to write timeout register before updating RcvHdrHead to ensure
11796          * that a new value is used when the HW decides to restart counting.
11797          */
11798         if (intr_adjust)
11799                 adjust_rcv_timeout(rcd, npkts);
11800         if (updegr) {
11801                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11802                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11803                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11804         }
11805         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11806                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11807                         << RCV_HDR_HEAD_HEAD_SHIFT);
11808         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11809 }
11810
11811 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11812 {
11813         u32 head, tail;
11814
11815         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11816                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11817
11818         if (rcd->rcvhdrtail_kvaddr)
11819                 tail = get_rcvhdrtail(rcd);
11820         else
11821                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11822
11823         return head == tail;
11824 }
11825
11826 /*
11827  * Context Control and Receive Array encoding for buffer size:
11828  *      0x0 invalid
11829  *      0x1   4 KB
11830  *      0x2   8 KB
11831  *      0x3  16 KB
11832  *      0x4  32 KB
11833  *      0x5  64 KB
11834  *      0x6 128 KB
11835  *      0x7 256 KB
11836  *      0x8 512 KB (Receive Array only)
11837  *      0x9   1 MB (Receive Array only)
11838  *      0xa   2 MB (Receive Array only)
11839  *
11840  *      0xB-0xF - reserved (Receive Array only)
11841  *
11842  *
11843  * This routine assumes that the value has already been sanity checked.
11844  */
11845 static u32 encoded_size(u32 size)
11846 {
11847         switch (size) {
11848         case   4 * 1024: return 0x1;
11849         case   8 * 1024: return 0x2;
11850         case  16 * 1024: return 0x3;
11851         case  32 * 1024: return 0x4;
11852         case  64 * 1024: return 0x5;
11853         case 128 * 1024: return 0x6;
11854         case 256 * 1024: return 0x7;
11855         case 512 * 1024: return 0x8;
11856         case   1 * 1024 * 1024: return 0x9;
11857         case   2 * 1024 * 1024: return 0xa;
11858         }
11859         return 0x1;     /* if invalid, go with the minimum size */
11860 }
11861
11862 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11863                   struct hfi1_ctxtdata *rcd)
11864 {
11865         u64 rcvctrl, reg;
11866         int did_enable = 0;
11867         u16 ctxt;
11868
11869         if (!rcd)
11870                 return;
11871
11872         ctxt = rcd->ctxt;
11873
11874         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11875
11876         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11877         /* if the context already enabled, don't do the extra steps */
11878         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11879             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11880                 /* reset the tail and hdr addresses, and sequence count */
11881                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11882                                 rcd->rcvhdrq_dma);
11883                 if (rcd->rcvhdrtail_kvaddr)
11884                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11885                                         rcd->rcvhdrqtailaddr_dma);
11886                 rcd->seq_cnt = 1;
11887
11888                 /* reset the cached receive header queue head value */
11889                 rcd->head = 0;
11890
11891                 /*
11892                  * Zero the receive header queue so we don't get false
11893                  * positives when checking the sequence number.  The
11894                  * sequence numbers could land exactly on the same spot.
11895                  * E.g. a rcd restart before the receive header wrapped.
11896                  */
11897                 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11898
11899                 /* starting timeout */
11900                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11901
11902                 /* enable the context */
11903                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11904
11905                 /* clean the egr buffer size first */
11906                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11907                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11908                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11909                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11910
11911                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11912                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11913                 did_enable = 1;
11914
11915                 /* zero RcvEgrIndexHead */
11916                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11917
11918                 /* set eager count and base index */
11919                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11920                         & RCV_EGR_CTRL_EGR_CNT_MASK)
11921                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11922                         (((rcd->eager_base >> RCV_SHIFT)
11923                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11924                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11925                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11926
11927                 /*
11928                  * Set TID (expected) count and base index.
11929                  * rcd->expected_count is set to individual RcvArray entries,
11930                  * not pairs, and the CSR takes a pair-count in groups of
11931                  * four, so divide by 8.
11932                  */
11933                 reg = (((rcd->expected_count >> RCV_SHIFT)
11934                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11935                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11936                       (((rcd->expected_base >> RCV_SHIFT)
11937                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11938                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11939                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11940                 if (ctxt == HFI1_CTRL_CTXT)
11941                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11942         }
11943         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11944                 write_csr(dd, RCV_VL15, 0);
11945                 /*
11946                  * When receive context is being disabled turn on tail
11947                  * update with a dummy tail address and then disable
11948                  * receive context.
11949                  */
11950                 if (dd->rcvhdrtail_dummy_dma) {
11951                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11952                                         dd->rcvhdrtail_dummy_dma);
11953                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11954                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11955                 }
11956
11957                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11958         }
11959         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
11960                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11961                               IS_RCVAVAIL_START + rcd->ctxt, true);
11962                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11963         }
11964         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
11965                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11966                               IS_RCVAVAIL_START + rcd->ctxt, false);
11967                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11968         }
11969         if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11970                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11971         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11972                 /* See comment on RcvCtxtCtrl.TailUpd above */
11973                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11974                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11975         }
11976         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11977                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11978         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11979                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11980         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11981                 /*
11982                  * In one-packet-per-eager mode, the size comes from
11983                  * the RcvArray entry.
11984                  */
11985                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11986                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11987         }
11988         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11989                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11990         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11991                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11992         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11993                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11994         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11995                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11996         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11997                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11998         if (op & HFI1_RCVCTRL_URGENT_ENB)
11999                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12000                               IS_RCVURGENT_START + rcd->ctxt, true);
12001         if (op & HFI1_RCVCTRL_URGENT_DIS)
12002                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12003                               IS_RCVURGENT_START + rcd->ctxt, false);
12004
12005         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12006         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12007
12008         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12009         if (did_enable &&
12010             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12011                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12012                 if (reg != 0) {
12013                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12014                                     ctxt, reg);
12015                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12016                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12017                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12018                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12019                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12020                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12021                                     ctxt, reg, reg == 0 ? "not" : "still");
12022                 }
12023         }
12024
12025         if (did_enable) {
12026                 /*
12027                  * The interrupt timeout and count must be set after
12028                  * the context is enabled to take effect.
12029                  */
12030                 /* set interrupt timeout */
12031                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12032                                 (u64)rcd->rcvavail_timeout <<
12033                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12034
12035                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12036                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12037                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12038         }
12039
12040         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12041                 /*
12042                  * If the context has been disabled and the Tail Update has
12043                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12044                  * so it doesn't contain an address that is invalid.
12045                  */
12046                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12047                                 dd->rcvhdrtail_dummy_dma);
12048 }
12049
12050 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12051 {
12052         int ret;
12053         u64 val = 0;
12054
12055         if (namep) {
12056                 ret = dd->cntrnameslen;
12057                 *namep = dd->cntrnames;
12058         } else {
12059                 const struct cntr_entry *entry;
12060                 int i, j;
12061
12062                 ret = (dd->ndevcntrs) * sizeof(u64);
12063
12064                 /* Get the start of the block of counters */
12065                 *cntrp = dd->cntrs;
12066
12067                 /*
12068                  * Now go and fill in each counter in the block.
12069                  */
12070                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12071                         entry = &dev_cntrs[i];
12072                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12073                         if (entry->flags & CNTR_DISABLED) {
12074                                 /* Nothing */
12075                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12076                         } else {
12077                                 if (entry->flags & CNTR_VL) {
12078                                         hfi1_cdbg(CNTR, "\tPer VL\n");
12079                                         for (j = 0; j < C_VL_COUNT; j++) {
12080                                                 val = entry->rw_cntr(entry,
12081                                                                   dd, j,
12082                                                                   CNTR_MODE_R,
12083                                                                   0);
12084                                                 hfi1_cdbg(
12085                                                    CNTR,
12086                                                    "\t\tRead 0x%llx for %d\n",
12087                                                    val, j);
12088                                                 dd->cntrs[entry->offset + j] =
12089                                                                             val;
12090                                         }
12091                                 } else if (entry->flags & CNTR_SDMA) {
12092                                         hfi1_cdbg(CNTR,
12093                                                   "\t Per SDMA Engine\n");
12094                                         for (j = 0; j < chip_sdma_engines(dd);
12095                                              j++) {
12096                                                 val =
12097                                                 entry->rw_cntr(entry, dd, j,
12098                                                                CNTR_MODE_R, 0);
12099                                                 hfi1_cdbg(CNTR,
12100                                                           "\t\tRead 0x%llx for %d\n",
12101                                                           val, j);
12102                                                 dd->cntrs[entry->offset + j] =
12103                                                                         val;
12104                                         }
12105                                 } else {
12106                                         val = entry->rw_cntr(entry, dd,
12107                                                         CNTR_INVALID_VL,
12108                                                         CNTR_MODE_R, 0);
12109                                         dd->cntrs[entry->offset] = val;
12110                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12111                                 }
12112                         }
12113                 }
12114         }
12115         return ret;
12116 }
12117
12118 /*
12119  * Used by sysfs to create files for hfi stats to read
12120  */
12121 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12122 {
12123         int ret;
12124         u64 val = 0;
12125
12126         if (namep) {
12127                 ret = ppd->dd->portcntrnameslen;
12128                 *namep = ppd->dd->portcntrnames;
12129         } else {
12130                 const struct cntr_entry *entry;
12131                 int i, j;
12132
12133                 ret = ppd->dd->nportcntrs * sizeof(u64);
12134                 *cntrp = ppd->cntrs;
12135
12136                 for (i = 0; i < PORT_CNTR_LAST; i++) {
12137                         entry = &port_cntrs[i];
12138                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12139                         if (entry->flags & CNTR_DISABLED) {
12140                                 /* Nothing */
12141                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12142                                 continue;
12143                         }
12144
12145                         if (entry->flags & CNTR_VL) {
12146                                 hfi1_cdbg(CNTR, "\tPer VL");
12147                                 for (j = 0; j < C_VL_COUNT; j++) {
12148                                         val = entry->rw_cntr(entry, ppd, j,
12149                                                                CNTR_MODE_R,
12150                                                                0);
12151                                         hfi1_cdbg(
12152                                            CNTR,
12153                                            "\t\tRead 0x%llx for %d",
12154                                            val, j);
12155                                         ppd->cntrs[entry->offset + j] = val;
12156                                 }
12157                         } else {
12158                                 val = entry->rw_cntr(entry, ppd,
12159                                                        CNTR_INVALID_VL,
12160                                                        CNTR_MODE_R,
12161                                                        0);
12162                                 ppd->cntrs[entry->offset] = val;
12163                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12164                         }
12165                 }
12166         }
12167         return ret;
12168 }
12169
12170 static void free_cntrs(struct hfi1_devdata *dd)
12171 {
12172         struct hfi1_pportdata *ppd;
12173         int i;
12174
12175         if (dd->synth_stats_timer.function)
12176                 del_timer_sync(&dd->synth_stats_timer);
12177         ppd = (struct hfi1_pportdata *)(dd + 1);
12178         for (i = 0; i < dd->num_pports; i++, ppd++) {
12179                 kfree(ppd->cntrs);
12180                 kfree(ppd->scntrs);
12181                 free_percpu(ppd->ibport_data.rvp.rc_acks);
12182                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12183                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12184                 ppd->cntrs = NULL;
12185                 ppd->scntrs = NULL;
12186                 ppd->ibport_data.rvp.rc_acks = NULL;
12187                 ppd->ibport_data.rvp.rc_qacks = NULL;
12188                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12189         }
12190         kfree(dd->portcntrnames);
12191         dd->portcntrnames = NULL;
12192         kfree(dd->cntrs);
12193         dd->cntrs = NULL;
12194         kfree(dd->scntrs);
12195         dd->scntrs = NULL;
12196         kfree(dd->cntrnames);
12197         dd->cntrnames = NULL;
12198         if (dd->update_cntr_wq) {
12199                 destroy_workqueue(dd->update_cntr_wq);
12200                 dd->update_cntr_wq = NULL;
12201         }
12202 }
12203
12204 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12205                               u64 *psval, void *context, int vl)
12206 {
12207         u64 val;
12208         u64 sval = *psval;
12209
12210         if (entry->flags & CNTR_DISABLED) {
12211                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12212                 return 0;
12213         }
12214
12215         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12216
12217         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12218
12219         /* If its a synthetic counter there is more work we need to do */
12220         if (entry->flags & CNTR_SYNTH) {
12221                 if (sval == CNTR_MAX) {
12222                         /* No need to read already saturated */
12223                         return CNTR_MAX;
12224                 }
12225
12226                 if (entry->flags & CNTR_32BIT) {
12227                         /* 32bit counters can wrap multiple times */
12228                         u64 upper = sval >> 32;
12229                         u64 lower = (sval << 32) >> 32;
12230
12231                         if (lower > val) { /* hw wrapped */
12232                                 if (upper == CNTR_32BIT_MAX)
12233                                         val = CNTR_MAX;
12234                                 else
12235                                         upper++;
12236                         }
12237
12238                         if (val != CNTR_MAX)
12239                                 val = (upper << 32) | val;
12240
12241                 } else {
12242                         /* If we rolled we are saturated */
12243                         if ((val < sval) || (val > CNTR_MAX))
12244                                 val = CNTR_MAX;
12245                 }
12246         }
12247
12248         *psval = val;
12249
12250         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12251
12252         return val;
12253 }
12254
12255 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12256                                struct cntr_entry *entry,
12257                                u64 *psval, void *context, int vl, u64 data)
12258 {
12259         u64 val;
12260
12261         if (entry->flags & CNTR_DISABLED) {
12262                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12263                 return 0;
12264         }
12265
12266         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12267
12268         if (entry->flags & CNTR_SYNTH) {
12269                 *psval = data;
12270                 if (entry->flags & CNTR_32BIT) {
12271                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12272                                              (data << 32) >> 32);
12273                         val = data; /* return the full 64bit value */
12274                 } else {
12275                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12276                                              data);
12277                 }
12278         } else {
12279                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12280         }
12281
12282         *psval = val;
12283
12284         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12285
12286         return val;
12287 }
12288
12289 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12290 {
12291         struct cntr_entry *entry;
12292         u64 *sval;
12293
12294         entry = &dev_cntrs[index];
12295         sval = dd->scntrs + entry->offset;
12296
12297         if (vl != CNTR_INVALID_VL)
12298                 sval += vl;
12299
12300         return read_dev_port_cntr(dd, entry, sval, dd, vl);
12301 }
12302
12303 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12304 {
12305         struct cntr_entry *entry;
12306         u64 *sval;
12307
12308         entry = &dev_cntrs[index];
12309         sval = dd->scntrs + entry->offset;
12310
12311         if (vl != CNTR_INVALID_VL)
12312                 sval += vl;
12313
12314         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12315 }
12316
12317 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12318 {
12319         struct cntr_entry *entry;
12320         u64 *sval;
12321
12322         entry = &port_cntrs[index];
12323         sval = ppd->scntrs + entry->offset;
12324
12325         if (vl != CNTR_INVALID_VL)
12326                 sval += vl;
12327
12328         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12329             (index <= C_RCV_HDR_OVF_LAST)) {
12330                 /* We do not want to bother for disabled contexts */
12331                 return 0;
12332         }
12333
12334         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12335 }
12336
12337 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12338 {
12339         struct cntr_entry *entry;
12340         u64 *sval;
12341
12342         entry = &port_cntrs[index];
12343         sval = ppd->scntrs + entry->offset;
12344
12345         if (vl != CNTR_INVALID_VL)
12346                 sval += vl;
12347
12348         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12349             (index <= C_RCV_HDR_OVF_LAST)) {
12350                 /* We do not want to bother for disabled contexts */
12351                 return 0;
12352         }
12353
12354         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12355 }
12356
12357 static void do_update_synth_timer(struct work_struct *work)
12358 {
12359         u64 cur_tx;
12360         u64 cur_rx;
12361         u64 total_flits;
12362         u8 update = 0;
12363         int i, j, vl;
12364         struct hfi1_pportdata *ppd;
12365         struct cntr_entry *entry;
12366         struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12367                                                update_cntr_work);
12368
12369         /*
12370          * Rather than keep beating on the CSRs pick a minimal set that we can
12371          * check to watch for potential roll over. We can do this by looking at
12372          * the number of flits sent/recv. If the total flits exceeds 32bits then
12373          * we have to iterate all the counters and update.
12374          */
12375         entry = &dev_cntrs[C_DC_RCV_FLITS];
12376         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12377
12378         entry = &dev_cntrs[C_DC_XMIT_FLITS];
12379         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12380
12381         hfi1_cdbg(
12382             CNTR,
12383             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12384             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12385
12386         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12387                 /*
12388                  * May not be strictly necessary to update but it won't hurt and
12389                  * simplifies the logic here.
12390                  */
12391                 update = 1;
12392                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12393                           dd->unit);
12394         } else {
12395                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12396                 hfi1_cdbg(CNTR,
12397                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12398                           total_flits, (u64)CNTR_32BIT_MAX);
12399                 if (total_flits >= CNTR_32BIT_MAX) {
12400                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12401                                   dd->unit);
12402                         update = 1;
12403                 }
12404         }
12405
12406         if (update) {
12407                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12408                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12409                         entry = &dev_cntrs[i];
12410                         if (entry->flags & CNTR_VL) {
12411                                 for (vl = 0; vl < C_VL_COUNT; vl++)
12412                                         read_dev_cntr(dd, i, vl);
12413                         } else {
12414                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12415                         }
12416                 }
12417                 ppd = (struct hfi1_pportdata *)(dd + 1);
12418                 for (i = 0; i < dd->num_pports; i++, ppd++) {
12419                         for (j = 0; j < PORT_CNTR_LAST; j++) {
12420                                 entry = &port_cntrs[j];
12421                                 if (entry->flags & CNTR_VL) {
12422                                         for (vl = 0; vl < C_VL_COUNT; vl++)
12423                                                 read_port_cntr(ppd, j, vl);
12424                                 } else {
12425                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
12426                                 }
12427                         }
12428                 }
12429
12430                 /*
12431                  * We want the value in the register. The goal is to keep track
12432                  * of the number of "ticks" not the counter value. In other
12433                  * words if the register rolls we want to notice it and go ahead
12434                  * and force an update.
12435                  */
12436                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12437                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12438                                                 CNTR_MODE_R, 0);
12439
12440                 entry = &dev_cntrs[C_DC_RCV_FLITS];
12441                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12442                                                 CNTR_MODE_R, 0);
12443
12444                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12445                           dd->unit, dd->last_tx, dd->last_rx);
12446
12447         } else {
12448                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12449         }
12450 }
12451
12452 static void update_synth_timer(struct timer_list *t)
12453 {
12454         struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12455
12456         queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12457         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12458 }
12459
12460 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12461 static int init_cntrs(struct hfi1_devdata *dd)
12462 {
12463         int i, rcv_ctxts, j;
12464         size_t sz;
12465         char *p;
12466         char name[C_MAX_NAME];
12467         struct hfi1_pportdata *ppd;
12468         const char *bit_type_32 = ",32";
12469         const int bit_type_32_sz = strlen(bit_type_32);
12470         u32 sdma_engines = chip_sdma_engines(dd);
12471
12472         /* set up the stats timer; the add_timer is done at the end */
12473         timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12474
12475         /***********************/
12476         /* per device counters */
12477         /***********************/
12478
12479         /* size names and determine how many we have*/
12480         dd->ndevcntrs = 0;
12481         sz = 0;
12482
12483         for (i = 0; i < DEV_CNTR_LAST; i++) {
12484                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12485                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12486                         continue;
12487                 }
12488
12489                 if (dev_cntrs[i].flags & CNTR_VL) {
12490                         dev_cntrs[i].offset = dd->ndevcntrs;
12491                         for (j = 0; j < C_VL_COUNT; j++) {
12492                                 snprintf(name, C_MAX_NAME, "%s%d",
12493                                          dev_cntrs[i].name, vl_from_idx(j));
12494                                 sz += strlen(name);
12495                                 /* Add ",32" for 32-bit counters */
12496                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12497                                         sz += bit_type_32_sz;
12498                                 sz++;
12499                                 dd->ndevcntrs++;
12500                         }
12501                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12502                         dev_cntrs[i].offset = dd->ndevcntrs;
12503                         for (j = 0; j < sdma_engines; j++) {
12504                                 snprintf(name, C_MAX_NAME, "%s%d",
12505                                          dev_cntrs[i].name, j);
12506                                 sz += strlen(name);
12507                                 /* Add ",32" for 32-bit counters */
12508                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12509                                         sz += bit_type_32_sz;
12510                                 sz++;
12511                                 dd->ndevcntrs++;
12512                         }
12513                 } else {
12514                         /* +1 for newline. */
12515                         sz += strlen(dev_cntrs[i].name) + 1;
12516                         /* Add ",32" for 32-bit counters */
12517                         if (dev_cntrs[i].flags & CNTR_32BIT)
12518                                 sz += bit_type_32_sz;
12519                         dev_cntrs[i].offset = dd->ndevcntrs;
12520                         dd->ndevcntrs++;
12521                 }
12522         }
12523
12524         /* allocate space for the counter values */
12525         dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12526                             GFP_KERNEL);
12527         if (!dd->cntrs)
12528                 goto bail;
12529
12530         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12531         if (!dd->scntrs)
12532                 goto bail;
12533
12534         /* allocate space for the counter names */
12535         dd->cntrnameslen = sz;
12536         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12537         if (!dd->cntrnames)
12538                 goto bail;
12539
12540         /* fill in the names */
12541         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12542                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12543                         /* Nothing */
12544                 } else if (dev_cntrs[i].flags & CNTR_VL) {
12545                         for (j = 0; j < C_VL_COUNT; j++) {
12546                                 snprintf(name, C_MAX_NAME, "%s%d",
12547                                          dev_cntrs[i].name,
12548                                          vl_from_idx(j));
12549                                 memcpy(p, name, strlen(name));
12550                                 p += strlen(name);
12551
12552                                 /* Counter is 32 bits */
12553                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12554                                         memcpy(p, bit_type_32, bit_type_32_sz);
12555                                         p += bit_type_32_sz;
12556                                 }
12557
12558                                 *p++ = '\n';
12559                         }
12560                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12561                         for (j = 0; j < sdma_engines; j++) {
12562                                 snprintf(name, C_MAX_NAME, "%s%d",
12563                                          dev_cntrs[i].name, j);
12564                                 memcpy(p, name, strlen(name));
12565                                 p += strlen(name);
12566
12567                                 /* Counter is 32 bits */
12568                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12569                                         memcpy(p, bit_type_32, bit_type_32_sz);
12570                                         p += bit_type_32_sz;
12571                                 }
12572
12573                                 *p++ = '\n';
12574                         }
12575                 } else {
12576                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12577                         p += strlen(dev_cntrs[i].name);
12578
12579                         /* Counter is 32 bits */
12580                         if (dev_cntrs[i].flags & CNTR_32BIT) {
12581                                 memcpy(p, bit_type_32, bit_type_32_sz);
12582                                 p += bit_type_32_sz;
12583                         }
12584
12585                         *p++ = '\n';
12586                 }
12587         }
12588
12589         /*********************/
12590         /* per port counters */
12591         /*********************/
12592
12593         /*
12594          * Go through the counters for the overflows and disable the ones we
12595          * don't need. This varies based on platform so we need to do it
12596          * dynamically here.
12597          */
12598         rcv_ctxts = dd->num_rcv_contexts;
12599         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12600              i <= C_RCV_HDR_OVF_LAST; i++) {
12601                 port_cntrs[i].flags |= CNTR_DISABLED;
12602         }
12603
12604         /* size port counter names and determine how many we have*/
12605         sz = 0;
12606         dd->nportcntrs = 0;
12607         for (i = 0; i < PORT_CNTR_LAST; i++) {
12608                 if (port_cntrs[i].flags & CNTR_DISABLED) {
12609                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12610                         continue;
12611                 }
12612
12613                 if (port_cntrs[i].flags & CNTR_VL) {
12614                         port_cntrs[i].offset = dd->nportcntrs;
12615                         for (j = 0; j < C_VL_COUNT; j++) {
12616                                 snprintf(name, C_MAX_NAME, "%s%d",
12617                                          port_cntrs[i].name, vl_from_idx(j));
12618                                 sz += strlen(name);
12619                                 /* Add ",32" for 32-bit counters */
12620                                 if (port_cntrs[i].flags & CNTR_32BIT)
12621                                         sz += bit_type_32_sz;
12622                                 sz++;
12623                                 dd->nportcntrs++;
12624                         }
12625                 } else {
12626                         /* +1 for newline */
12627                         sz += strlen(port_cntrs[i].name) + 1;
12628                         /* Add ",32" for 32-bit counters */
12629                         if (port_cntrs[i].flags & CNTR_32BIT)
12630                                 sz += bit_type_32_sz;
12631                         port_cntrs[i].offset = dd->nportcntrs;
12632                         dd->nportcntrs++;
12633                 }
12634         }
12635
12636         /* allocate space for the counter names */
12637         dd->portcntrnameslen = sz;
12638         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12639         if (!dd->portcntrnames)
12640                 goto bail;
12641
12642         /* fill in port cntr names */
12643         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12644                 if (port_cntrs[i].flags & CNTR_DISABLED)
12645                         continue;
12646
12647                 if (port_cntrs[i].flags & CNTR_VL) {
12648                         for (j = 0; j < C_VL_COUNT; j++) {
12649                                 snprintf(name, C_MAX_NAME, "%s%d",
12650                                          port_cntrs[i].name, vl_from_idx(j));
12651                                 memcpy(p, name, strlen(name));
12652                                 p += strlen(name);
12653
12654                                 /* Counter is 32 bits */
12655                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12656                                         memcpy(p, bit_type_32, bit_type_32_sz);
12657                                         p += bit_type_32_sz;
12658                                 }
12659
12660                                 *p++ = '\n';
12661                         }
12662                 } else {
12663                         memcpy(p, port_cntrs[i].name,
12664                                strlen(port_cntrs[i].name));
12665                         p += strlen(port_cntrs[i].name);
12666
12667                         /* Counter is 32 bits */
12668                         if (port_cntrs[i].flags & CNTR_32BIT) {
12669                                 memcpy(p, bit_type_32, bit_type_32_sz);
12670                                 p += bit_type_32_sz;
12671                         }
12672
12673                         *p++ = '\n';
12674                 }
12675         }
12676
12677         /* allocate per port storage for counter values */
12678         ppd = (struct hfi1_pportdata *)(dd + 1);
12679         for (i = 0; i < dd->num_pports; i++, ppd++) {
12680                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12681                 if (!ppd->cntrs)
12682                         goto bail;
12683
12684                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12685                 if (!ppd->scntrs)
12686                         goto bail;
12687         }
12688
12689         /* CPU counters need to be allocated and zeroed */
12690         if (init_cpu_counters(dd))
12691                 goto bail;
12692
12693         dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12694                                                      WQ_MEM_RECLAIM, dd->unit);
12695         if (!dd->update_cntr_wq)
12696                 goto bail;
12697
12698         INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12699
12700         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12701         return 0;
12702 bail:
12703         free_cntrs(dd);
12704         return -ENOMEM;
12705 }
12706
12707 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12708 {
12709         switch (chip_lstate) {
12710         default:
12711                 dd_dev_err(dd,
12712                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12713                            chip_lstate);
12714                 /* fall through */
12715         case LSTATE_DOWN:
12716                 return IB_PORT_DOWN;
12717         case LSTATE_INIT:
12718                 return IB_PORT_INIT;
12719         case LSTATE_ARMED:
12720                 return IB_PORT_ARMED;
12721         case LSTATE_ACTIVE:
12722                 return IB_PORT_ACTIVE;
12723         }
12724 }
12725
12726 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12727 {
12728         /* look at the HFI meta-states only */
12729         switch (chip_pstate & 0xf0) {
12730         default:
12731                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12732                            chip_pstate);
12733                 /* fall through */
12734         case PLS_DISABLED:
12735                 return IB_PORTPHYSSTATE_DISABLED;
12736         case PLS_OFFLINE:
12737                 return OPA_PORTPHYSSTATE_OFFLINE;
12738         case PLS_POLLING:
12739                 return IB_PORTPHYSSTATE_POLLING;
12740         case PLS_CONFIGPHY:
12741                 return IB_PORTPHYSSTATE_TRAINING;
12742         case PLS_LINKUP:
12743                 return IB_PORTPHYSSTATE_LINKUP;
12744         case PLS_PHYTEST:
12745                 return IB_PORTPHYSSTATE_PHY_TEST;
12746         }
12747 }
12748
12749 /* return the OPA port logical state name */
12750 const char *opa_lstate_name(u32 lstate)
12751 {
12752         static const char * const port_logical_names[] = {
12753                 "PORT_NOP",
12754                 "PORT_DOWN",
12755                 "PORT_INIT",
12756                 "PORT_ARMED",
12757                 "PORT_ACTIVE",
12758                 "PORT_ACTIVE_DEFER",
12759         };
12760         if (lstate < ARRAY_SIZE(port_logical_names))
12761                 return port_logical_names[lstate];
12762         return "unknown";
12763 }
12764
12765 /* return the OPA port physical state name */
12766 const char *opa_pstate_name(u32 pstate)
12767 {
12768         static const char * const port_physical_names[] = {
12769                 "PHYS_NOP",
12770                 "reserved1",
12771                 "PHYS_POLL",
12772                 "PHYS_DISABLED",
12773                 "PHYS_TRAINING",
12774                 "PHYS_LINKUP",
12775                 "PHYS_LINK_ERR_RECOVER",
12776                 "PHYS_PHY_TEST",
12777                 "reserved8",
12778                 "PHYS_OFFLINE",
12779                 "PHYS_GANGED",
12780                 "PHYS_TEST",
12781         };
12782         if (pstate < ARRAY_SIZE(port_physical_names))
12783                 return port_physical_names[pstate];
12784         return "unknown";
12785 }
12786
12787 /**
12788  * update_statusp - Update userspace status flag
12789  * @ppd: Port data structure
12790  * @state: port state information
12791  *
12792  * Actual port status is determined by the host_link_state value
12793  * in the ppd.
12794  *
12795  * host_link_state MUST be updated before updating the user space
12796  * statusp.
12797  */
12798 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12799 {
12800         /*
12801          * Set port status flags in the page mapped into userspace
12802          * memory. Do it here to ensure a reliable state - this is
12803          * the only function called by all state handling code.
12804          * Always set the flags due to the fact that the cache value
12805          * might have been changed explicitly outside of this
12806          * function.
12807          */
12808         if (ppd->statusp) {
12809                 switch (state) {
12810                 case IB_PORT_DOWN:
12811                 case IB_PORT_INIT:
12812                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12813                                            HFI1_STATUS_IB_READY);
12814                         break;
12815                 case IB_PORT_ARMED:
12816                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12817                         break;
12818                 case IB_PORT_ACTIVE:
12819                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12820                         break;
12821                 }
12822         }
12823         dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12824                     opa_lstate_name(state), state);
12825 }
12826
12827 /**
12828  * wait_logical_linkstate - wait for an IB link state change to occur
12829  * @ppd: port device
12830  * @state: the state to wait for
12831  * @msecs: the number of milliseconds to wait
12832  *
12833  * Wait up to msecs milliseconds for IB link state change to occur.
12834  * For now, take the easy polling route.
12835  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12836  */
12837 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12838                                   int msecs)
12839 {
12840         unsigned long timeout;
12841         u32 new_state;
12842
12843         timeout = jiffies + msecs_to_jiffies(msecs);
12844         while (1) {
12845                 new_state = chip_to_opa_lstate(ppd->dd,
12846                                                read_logical_state(ppd->dd));
12847                 if (new_state == state)
12848                         break;
12849                 if (time_after(jiffies, timeout)) {
12850                         dd_dev_err(ppd->dd,
12851                                    "timeout waiting for link state 0x%x\n",
12852                                    state);
12853                         return -ETIMEDOUT;
12854                 }
12855                 msleep(20);
12856         }
12857
12858         return 0;
12859 }
12860
12861 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12862 {
12863         u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12864
12865         dd_dev_info(ppd->dd,
12866                     "physical state changed to %s (0x%x), phy 0x%x\n",
12867                     opa_pstate_name(ib_pstate), ib_pstate, state);
12868 }
12869
12870 /*
12871  * Read the physical hardware link state and check if it matches host
12872  * drivers anticipated state.
12873  */
12874 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12875 {
12876         u32 read_state = read_physical_state(ppd->dd);
12877
12878         if (read_state == state) {
12879                 log_state_transition(ppd, state);
12880         } else {
12881                 dd_dev_err(ppd->dd,
12882                            "anticipated phy link state 0x%x, read 0x%x\n",
12883                            state, read_state);
12884         }
12885 }
12886
12887 /*
12888  * wait_physical_linkstate - wait for an physical link state change to occur
12889  * @ppd: port device
12890  * @state: the state to wait for
12891  * @msecs: the number of milliseconds to wait
12892  *
12893  * Wait up to msecs milliseconds for physical link state change to occur.
12894  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12895  */
12896 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12897                                    int msecs)
12898 {
12899         u32 read_state;
12900         unsigned long timeout;
12901
12902         timeout = jiffies + msecs_to_jiffies(msecs);
12903         while (1) {
12904                 read_state = read_physical_state(ppd->dd);
12905                 if (read_state == state)
12906                         break;
12907                 if (time_after(jiffies, timeout)) {
12908                         dd_dev_err(ppd->dd,
12909                                    "timeout waiting for phy link state 0x%x\n",
12910                                    state);
12911                         return -ETIMEDOUT;
12912                 }
12913                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12914         }
12915
12916         log_state_transition(ppd, state);
12917         return 0;
12918 }
12919
12920 /*
12921  * wait_phys_link_offline_quiet_substates - wait for any offline substate
12922  * @ppd: port device
12923  * @msecs: the number of milliseconds to wait
12924  *
12925  * Wait up to msecs milliseconds for any offline physical link
12926  * state change to occur.
12927  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12928  */
12929 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12930                                             int msecs)
12931 {
12932         u32 read_state;
12933         unsigned long timeout;
12934
12935         timeout = jiffies + msecs_to_jiffies(msecs);
12936         while (1) {
12937                 read_state = read_physical_state(ppd->dd);
12938                 if ((read_state & 0xF0) == PLS_OFFLINE)
12939                         break;
12940                 if (time_after(jiffies, timeout)) {
12941                         dd_dev_err(ppd->dd,
12942                                    "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12943                                    read_state, msecs);
12944                         return -ETIMEDOUT;
12945                 }
12946                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12947         }
12948
12949         log_state_transition(ppd, read_state);
12950         return read_state;
12951 }
12952
12953 /*
12954  * wait_phys_link_out_of_offline - wait for any out of offline state
12955  * @ppd: port device
12956  * @msecs: the number of milliseconds to wait
12957  *
12958  * Wait up to msecs milliseconds for any out of offline physical link
12959  * state change to occur.
12960  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12961  */
12962 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12963                                          int msecs)
12964 {
12965         u32 read_state;
12966         unsigned long timeout;
12967
12968         timeout = jiffies + msecs_to_jiffies(msecs);
12969         while (1) {
12970                 read_state = read_physical_state(ppd->dd);
12971                 if ((read_state & 0xF0) != PLS_OFFLINE)
12972                         break;
12973                 if (time_after(jiffies, timeout)) {
12974                         dd_dev_err(ppd->dd,
12975                                    "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12976                                    read_state, msecs);
12977                         return -ETIMEDOUT;
12978                 }
12979                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12980         }
12981
12982         log_state_transition(ppd, read_state);
12983         return read_state;
12984 }
12985
12986 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12987 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12988
12989 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12990 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12991
12992 void hfi1_init_ctxt(struct send_context *sc)
12993 {
12994         if (sc) {
12995                 struct hfi1_devdata *dd = sc->dd;
12996                 u64 reg;
12997                 u8 set = (sc->type == SC_USER ?
12998                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12999                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13000                 reg = read_kctxt_csr(dd, sc->hw_context,
13001                                      SEND_CTXT_CHECK_ENABLE);
13002                 if (set)
13003                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13004                 else
13005                         SET_STATIC_RATE_CONTROL_SMASK(reg);
13006                 write_kctxt_csr(dd, sc->hw_context,
13007                                 SEND_CTXT_CHECK_ENABLE, reg);
13008         }
13009 }
13010
13011 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13012 {
13013         int ret = 0;
13014         u64 reg;
13015
13016         if (dd->icode != ICODE_RTL_SILICON) {
13017                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13018                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13019                                     __func__);
13020                 return -EINVAL;
13021         }
13022         reg = read_csr(dd, ASIC_STS_THERM);
13023         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13024                       ASIC_STS_THERM_CURR_TEMP_MASK);
13025         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13026                         ASIC_STS_THERM_LO_TEMP_MASK);
13027         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13028                         ASIC_STS_THERM_HI_TEMP_MASK);
13029         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13030                           ASIC_STS_THERM_CRIT_TEMP_MASK);
13031         /* triggers is a 3-bit value - 1 bit per trigger. */
13032         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13033
13034         return ret;
13035 }
13036
13037 /* ========================================================================= */
13038
13039 /**
13040  * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13041  * @dd: valid devdata
13042  * @src: IRQ source to determine register index from
13043  * @bits: the bits to set or clear
13044  * @set: true == set the bits, false == clear the bits
13045  *
13046  */
13047 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13048                            bool set)
13049 {
13050         u64 reg;
13051         u16 idx = src / BITS_PER_REGISTER;
13052
13053         spin_lock(&dd->irq_src_lock);
13054         reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13055         if (set)
13056                 reg |= bits;
13057         else
13058                 reg &= ~bits;
13059         write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13060         spin_unlock(&dd->irq_src_lock);
13061 }
13062
13063 /**
13064  * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13065  * @dd: valid devdata
13066  * @first: first IRQ source to set/clear
13067  * @last: last IRQ source (inclusive) to set/clear
13068  * @set: true == set the bits, false == clear the bits
13069  *
13070  * If first == last, set the exact source.
13071  */
13072 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13073 {
13074         u64 bits = 0;
13075         u64 bit;
13076         u16 src;
13077
13078         if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13079                 return -EINVAL;
13080
13081         if (last < first)
13082                 return -ERANGE;
13083
13084         for (src = first; src <= last; src++) {
13085                 bit = src % BITS_PER_REGISTER;
13086                 /* wrapped to next register? */
13087                 if (!bit && bits) {
13088                         read_mod_write(dd, src - 1, bits, set);
13089                         bits = 0;
13090                 }
13091                 bits |= BIT_ULL(bit);
13092         }
13093         read_mod_write(dd, last, bits, set);
13094
13095         return 0;
13096 }
13097
13098 /*
13099  * Clear all interrupt sources on the chip.
13100  */
13101 void clear_all_interrupts(struct hfi1_devdata *dd)
13102 {
13103         int i;
13104
13105         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13106                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13107
13108         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13109         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13110         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13111         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13112         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13113         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13114         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13115         for (i = 0; i < chip_send_contexts(dd); i++)
13116                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13117         for (i = 0; i < chip_sdma_engines(dd); i++)
13118                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13119
13120         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13121         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13122         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13123 }
13124
13125 /*
13126  * Remap the interrupt source from the general handler to the given MSI-X
13127  * interrupt.
13128  */
13129 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13130 {
13131         u64 reg;
13132         int m, n;
13133
13134         /* clear from the handled mask of the general interrupt */
13135         m = isrc / 64;
13136         n = isrc % 64;
13137         if (likely(m < CCE_NUM_INT_CSRS)) {
13138                 dd->gi_mask[m] &= ~((u64)1 << n);
13139         } else {
13140                 dd_dev_err(dd, "remap interrupt err\n");
13141                 return;
13142         }
13143
13144         /* direct the chip source to the given MSI-X interrupt */
13145         m = isrc / 8;
13146         n = isrc % 8;
13147         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13148         reg &= ~((u64)0xff << (8 * n));
13149         reg |= ((u64)msix_intr & 0xff) << (8 * n);
13150         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13151 }
13152
13153 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13154 {
13155         /*
13156          * SDMA engine interrupt sources grouped by type, rather than
13157          * engine.  Per-engine interrupts are as follows:
13158          *      SDMA
13159          *      SDMAProgress
13160          *      SDMAIdle
13161          */
13162         remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13163         remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13164         remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13165 }
13166
13167 /*
13168  * Set the general handler to accept all interrupts, remap all
13169  * chip interrupts back to MSI-X 0.
13170  */
13171 void reset_interrupts(struct hfi1_devdata *dd)
13172 {
13173         int i;
13174
13175         /* all interrupts handled by the general handler */
13176         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13177                 dd->gi_mask[i] = ~(u64)0;
13178
13179         /* all chip interrupts map to MSI-X 0 */
13180         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13181                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13182 }
13183
13184 /**
13185  * set_up_interrupts() - Initialize the IRQ resources and state
13186  * @dd: valid devdata
13187  *
13188  */
13189 static int set_up_interrupts(struct hfi1_devdata *dd)
13190 {
13191         int ret;
13192
13193         /* mask all interrupts */
13194         set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13195
13196         /* clear all pending interrupts */
13197         clear_all_interrupts(dd);
13198
13199         /* reset general handler mask, chip MSI-X mappings */
13200         reset_interrupts(dd);
13201
13202         /* ask for MSI-X interrupts */
13203         ret = msix_initialize(dd);
13204         if (ret)
13205                 return ret;
13206
13207         ret = msix_request_irqs(dd);
13208         if (ret)
13209                 msix_clean_up_interrupts(dd);
13210
13211         return ret;
13212 }
13213
13214 /*
13215  * Set up context values in dd.  Sets:
13216  *
13217  *      num_rcv_contexts - number of contexts being used
13218  *      n_krcv_queues - number of kernel contexts
13219  *      first_dyn_alloc_ctxt - first dynamically allocated context
13220  *                             in array of contexts
13221  *      freectxts  - number of free user contexts
13222  *      num_send_contexts - number of PIO send contexts being used
13223  *      num_vnic_contexts - number of contexts reserved for VNIC
13224  */
13225 static int set_up_context_variables(struct hfi1_devdata *dd)
13226 {
13227         unsigned long num_kernel_contexts;
13228         u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13229         int total_contexts;
13230         int ret;
13231         unsigned ngroups;
13232         int qos_rmt_count;
13233         int user_rmt_reduced;
13234         u32 n_usr_ctxts;
13235         u32 send_contexts = chip_send_contexts(dd);
13236         u32 rcv_contexts = chip_rcv_contexts(dd);
13237
13238         /*
13239          * Kernel receive contexts:
13240          * - Context 0 - control context (VL15/multicast/error)
13241          * - Context 1 - first kernel context
13242          * - Context 2 - second kernel context
13243          * ...
13244          */
13245         if (n_krcvqs)
13246                 /*
13247                  * n_krcvqs is the sum of module parameter kernel receive
13248                  * contexts, krcvqs[].  It does not include the control
13249                  * context, so add that.
13250                  */
13251                 num_kernel_contexts = n_krcvqs + 1;
13252         else
13253                 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13254         /*
13255          * Every kernel receive context needs an ACK send context.
13256          * one send context is allocated for each VL{0-7} and VL15
13257          */
13258         if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13259                 dd_dev_err(dd,
13260                            "Reducing # kernel rcv contexts to: %d, from %lu\n",
13261                            send_contexts - num_vls - 1,
13262                            num_kernel_contexts);
13263                 num_kernel_contexts = send_contexts - num_vls - 1;
13264         }
13265
13266         /* Accommodate VNIC contexts if possible */
13267         if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13268                 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13269                 num_vnic_contexts = 0;
13270         }
13271         total_contexts = num_kernel_contexts + num_vnic_contexts;
13272
13273         /*
13274          * User contexts:
13275          *      - default to 1 user context per real (non-HT) CPU core if
13276          *        num_user_contexts is negative
13277          */
13278         if (num_user_contexts < 0)
13279                 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13280         else
13281                 n_usr_ctxts = num_user_contexts;
13282         /*
13283          * Adjust the counts given a global max.
13284          */
13285         if (total_contexts + n_usr_ctxts > rcv_contexts) {
13286                 dd_dev_err(dd,
13287                            "Reducing # user receive contexts to: %d, from %u\n",
13288                            rcv_contexts - total_contexts,
13289                            n_usr_ctxts);
13290                 /* recalculate */
13291                 n_usr_ctxts = rcv_contexts - total_contexts;
13292         }
13293
13294         /* each user context requires an entry in the RMT */
13295         qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13296         if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13297                 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13298                 dd_dev_err(dd,
13299                            "RMT size is reducing the number of user receive contexts from %u to %d\n",
13300                            n_usr_ctxts,
13301                            user_rmt_reduced);
13302                 /* recalculate */
13303                 n_usr_ctxts = user_rmt_reduced;
13304         }
13305
13306         total_contexts += n_usr_ctxts;
13307
13308         /* the first N are kernel contexts, the rest are user/vnic contexts */
13309         dd->num_rcv_contexts = total_contexts;
13310         dd->n_krcv_queues = num_kernel_contexts;
13311         dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13312         dd->num_vnic_contexts = num_vnic_contexts;
13313         dd->num_user_contexts = n_usr_ctxts;
13314         dd->freectxts = n_usr_ctxts;
13315         dd_dev_info(dd,
13316                     "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13317                     rcv_contexts,
13318                     (int)dd->num_rcv_contexts,
13319                     (int)dd->n_krcv_queues,
13320                     dd->num_vnic_contexts,
13321                     dd->num_user_contexts);
13322
13323         /*
13324          * Receive array allocation:
13325          *   All RcvArray entries are divided into groups of 8. This
13326          *   is required by the hardware and will speed up writes to
13327          *   consecutive entries by using write-combining of the entire
13328          *   cacheline.
13329          *
13330          *   The number of groups are evenly divided among all contexts.
13331          *   any left over groups will be given to the first N user
13332          *   contexts.
13333          */
13334         dd->rcv_entries.group_size = RCV_INCREMENT;
13335         ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13336         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13337         dd->rcv_entries.nctxt_extra = ngroups -
13338                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13339         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13340                     dd->rcv_entries.ngroups,
13341                     dd->rcv_entries.nctxt_extra);
13342         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13343             MAX_EAGER_ENTRIES * 2) {
13344                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13345                         dd->rcv_entries.group_size;
13346                 dd_dev_info(dd,
13347                             "RcvArray group count too high, change to %u\n",
13348                             dd->rcv_entries.ngroups);
13349                 dd->rcv_entries.nctxt_extra = 0;
13350         }
13351         /*
13352          * PIO send contexts
13353          */
13354         ret = init_sc_pools_and_sizes(dd);
13355         if (ret >= 0) { /* success */
13356                 dd->num_send_contexts = ret;
13357                 dd_dev_info(
13358                         dd,
13359                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13360                         send_contexts,
13361                         dd->num_send_contexts,
13362                         dd->sc_sizes[SC_KERNEL].count,
13363                         dd->sc_sizes[SC_ACK].count,
13364                         dd->sc_sizes[SC_USER].count,
13365                         dd->sc_sizes[SC_VL15].count);
13366                 ret = 0;        /* success */
13367         }
13368
13369         return ret;
13370 }
13371
13372 /*
13373  * Set the device/port partition key table. The MAD code
13374  * will ensure that, at least, the partial management
13375  * partition key is present in the table.
13376  */
13377 static void set_partition_keys(struct hfi1_pportdata *ppd)
13378 {
13379         struct hfi1_devdata *dd = ppd->dd;
13380         u64 reg = 0;
13381         int i;
13382
13383         dd_dev_info(dd, "Setting partition keys\n");
13384         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13385                 reg |= (ppd->pkeys[i] &
13386                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13387                         ((i % 4) *
13388                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13389                 /* Each register holds 4 PKey values. */
13390                 if ((i % 4) == 3) {
13391                         write_csr(dd, RCV_PARTITION_KEY +
13392                                   ((i - 3) * 2), reg);
13393                         reg = 0;
13394                 }
13395         }
13396
13397         /* Always enable HW pkeys check when pkeys table is set */
13398         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13399 }
13400
13401 /*
13402  * These CSRs and memories are uninitialized on reset and must be
13403  * written before reading to set the ECC/parity bits.
13404  *
13405  * NOTE: All user context CSRs that are not mmaped write-only
13406  * (e.g. the TID flows) must be initialized even if the driver never
13407  * reads them.
13408  */
13409 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13410 {
13411         int i, j;
13412
13413         /* CceIntMap */
13414         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13415                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13416
13417         /* SendCtxtCreditReturnAddr */
13418         for (i = 0; i < chip_send_contexts(dd); i++)
13419                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13420
13421         /* PIO Send buffers */
13422         /* SDMA Send buffers */
13423         /*
13424          * These are not normally read, and (presently) have no method
13425          * to be read, so are not pre-initialized
13426          */
13427
13428         /* RcvHdrAddr */
13429         /* RcvHdrTailAddr */
13430         /* RcvTidFlowTable */
13431         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13432                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13433                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13434                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13435                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13436         }
13437
13438         /* RcvArray */
13439         for (i = 0; i < chip_rcv_array_count(dd); i++)
13440                 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13441
13442         /* RcvQPMapTable */
13443         for (i = 0; i < 32; i++)
13444                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13445 }
13446
13447 /*
13448  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13449  */
13450 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13451                              u64 ctrl_bits)
13452 {
13453         unsigned long timeout;
13454         u64 reg;
13455
13456         /* is the condition present? */
13457         reg = read_csr(dd, CCE_STATUS);
13458         if ((reg & status_bits) == 0)
13459                 return;
13460
13461         /* clear the condition */
13462         write_csr(dd, CCE_CTRL, ctrl_bits);
13463
13464         /* wait for the condition to clear */
13465         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13466         while (1) {
13467                 reg = read_csr(dd, CCE_STATUS);
13468                 if ((reg & status_bits) == 0)
13469                         return;
13470                 if (time_after(jiffies, timeout)) {
13471                         dd_dev_err(dd,
13472                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13473                                    status_bits, reg & status_bits);
13474                         return;
13475                 }
13476                 udelay(1);
13477         }
13478 }
13479
13480 /* set CCE CSRs to chip reset defaults */
13481 static void reset_cce_csrs(struct hfi1_devdata *dd)
13482 {
13483         int i;
13484
13485         /* CCE_REVISION read-only */
13486         /* CCE_REVISION2 read-only */
13487         /* CCE_CTRL - bits clear automatically */
13488         /* CCE_STATUS read-only, use CceCtrl to clear */
13489         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13490         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13491         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13492         for (i = 0; i < CCE_NUM_SCRATCH; i++)
13493                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13494         /* CCE_ERR_STATUS read-only */
13495         write_csr(dd, CCE_ERR_MASK, 0);
13496         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13497         /* CCE_ERR_FORCE leave alone */
13498         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13499                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13500         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13501         /* CCE_PCIE_CTRL leave alone */
13502         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13503                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13504                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13505                           CCE_MSIX_TABLE_UPPER_RESETCSR);
13506         }
13507         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13508                 /* CCE_MSIX_PBA read-only */
13509                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13510                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13511         }
13512         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13513                 write_csr(dd, CCE_INT_MAP, 0);
13514         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13515                 /* CCE_INT_STATUS read-only */
13516                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13517                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13518                 /* CCE_INT_FORCE leave alone */
13519                 /* CCE_INT_BLOCKED read-only */
13520         }
13521         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13522                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13523 }
13524
13525 /* set MISC CSRs to chip reset defaults */
13526 static void reset_misc_csrs(struct hfi1_devdata *dd)
13527 {
13528         int i;
13529
13530         for (i = 0; i < 32; i++) {
13531                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13532                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13533                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13534         }
13535         /*
13536          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13537          * only be written 128-byte chunks
13538          */
13539         /* init RSA engine to clear lingering errors */
13540         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13541         write_csr(dd, MISC_CFG_RSA_MU, 0);
13542         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13543         /* MISC_STS_8051_DIGEST read-only */
13544         /* MISC_STS_SBM_DIGEST read-only */
13545         /* MISC_STS_PCIE_DIGEST read-only */
13546         /* MISC_STS_FAB_DIGEST read-only */
13547         /* MISC_ERR_STATUS read-only */
13548         write_csr(dd, MISC_ERR_MASK, 0);
13549         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13550         /* MISC_ERR_FORCE leave alone */
13551 }
13552
13553 /* set TXE CSRs to chip reset defaults */
13554 static void reset_txe_csrs(struct hfi1_devdata *dd)
13555 {
13556         int i;
13557
13558         /*
13559          * TXE Kernel CSRs
13560          */
13561         write_csr(dd, SEND_CTRL, 0);
13562         __cm_reset(dd, 0);      /* reset CM internal state */
13563         /* SEND_CONTEXTS read-only */
13564         /* SEND_DMA_ENGINES read-only */
13565         /* SEND_PIO_MEM_SIZE read-only */
13566         /* SEND_DMA_MEM_SIZE read-only */
13567         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13568         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13569         /* SEND_PIO_ERR_STATUS read-only */
13570         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13571         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13572         /* SEND_PIO_ERR_FORCE leave alone */
13573         /* SEND_DMA_ERR_STATUS read-only */
13574         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13575         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13576         /* SEND_DMA_ERR_FORCE leave alone */
13577         /* SEND_EGRESS_ERR_STATUS read-only */
13578         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13579         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13580         /* SEND_EGRESS_ERR_FORCE leave alone */
13581         write_csr(dd, SEND_BTH_QP, 0);
13582         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13583         write_csr(dd, SEND_SC2VLT0, 0);
13584         write_csr(dd, SEND_SC2VLT1, 0);
13585         write_csr(dd, SEND_SC2VLT2, 0);
13586         write_csr(dd, SEND_SC2VLT3, 0);
13587         write_csr(dd, SEND_LEN_CHECK0, 0);
13588         write_csr(dd, SEND_LEN_CHECK1, 0);
13589         /* SEND_ERR_STATUS read-only */
13590         write_csr(dd, SEND_ERR_MASK, 0);
13591         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13592         /* SEND_ERR_FORCE read-only */
13593         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13594                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13595         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13596                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13597         for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13598                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13599         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13600                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13601         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13602                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13603         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13604         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13605         /* SEND_CM_CREDIT_USED_STATUS read-only */
13606         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13607         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13608         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13609         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13610         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13611         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13612                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13613         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13614         /* SEND_CM_CREDIT_USED_VL read-only */
13615         /* SEND_CM_CREDIT_USED_VL15 read-only */
13616         /* SEND_EGRESS_CTXT_STATUS read-only */
13617         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13618         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13619         /* SEND_EGRESS_ERR_INFO read-only */
13620         /* SEND_EGRESS_ERR_SOURCE read-only */
13621
13622         /*
13623          * TXE Per-Context CSRs
13624          */
13625         for (i = 0; i < chip_send_contexts(dd); i++) {
13626                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13627                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13628                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13629                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13630                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13631                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13632                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13633                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13634                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13635                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13636                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13637                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13638         }
13639
13640         /*
13641          * TXE Per-SDMA CSRs
13642          */
13643         for (i = 0; i < chip_sdma_engines(dd); i++) {
13644                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13645                 /* SEND_DMA_STATUS read-only */
13646                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13647                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13648                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13649                 /* SEND_DMA_HEAD read-only */
13650                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13651                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13652                 /* SEND_DMA_IDLE_CNT read-only */
13653                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13654                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13655                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13656                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13657                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13658                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13659                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13660                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13661                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13662                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13663                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13664                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13665                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13666                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13667         }
13668 }
13669
13670 /*
13671  * Expect on entry:
13672  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13673  */
13674 static void init_rbufs(struct hfi1_devdata *dd)
13675 {
13676         u64 reg;
13677         int count;
13678
13679         /*
13680          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13681          * clear.
13682          */
13683         count = 0;
13684         while (1) {
13685                 reg = read_csr(dd, RCV_STATUS);
13686                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13687                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13688                         break;
13689                 /*
13690                  * Give up after 1ms - maximum wait time.
13691                  *
13692                  * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13693                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13694                  *      136 KB / (66% * 250MB/s) = 844us
13695                  */
13696                 if (count++ > 500) {
13697                         dd_dev_err(dd,
13698                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13699                                    __func__, reg);
13700                         break;
13701                 }
13702                 udelay(2); /* do not busy-wait the CSR */
13703         }
13704
13705         /* start the init - expect RcvCtrl to be 0 */
13706         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13707
13708         /*
13709          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13710          * period after the write before RcvStatus.RxRbufInitDone is valid.
13711          * The delay in the first run through the loop below is sufficient and
13712          * required before the first read of RcvStatus.RxRbufInintDone.
13713          */
13714         read_csr(dd, RCV_CTRL);
13715
13716         /* wait for the init to finish */
13717         count = 0;
13718         while (1) {
13719                 /* delay is required first time through - see above */
13720                 udelay(2); /* do not busy-wait the CSR */
13721                 reg = read_csr(dd, RCV_STATUS);
13722                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13723                         break;
13724
13725                 /* give up after 100us - slowest possible at 33MHz is 73us */
13726                 if (count++ > 50) {
13727                         dd_dev_err(dd,
13728                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13729                                    __func__);
13730                         break;
13731                 }
13732         }
13733 }
13734
13735 /* set RXE CSRs to chip reset defaults */
13736 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13737 {
13738         int i, j;
13739
13740         /*
13741          * RXE Kernel CSRs
13742          */
13743         write_csr(dd, RCV_CTRL, 0);
13744         init_rbufs(dd);
13745         /* RCV_STATUS read-only */
13746         /* RCV_CONTEXTS read-only */
13747         /* RCV_ARRAY_CNT read-only */
13748         /* RCV_BUF_SIZE read-only */
13749         write_csr(dd, RCV_BTH_QP, 0);
13750         write_csr(dd, RCV_MULTICAST, 0);
13751         write_csr(dd, RCV_BYPASS, 0);
13752         write_csr(dd, RCV_VL15, 0);
13753         /* this is a clear-down */
13754         write_csr(dd, RCV_ERR_INFO,
13755                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13756         /* RCV_ERR_STATUS read-only */
13757         write_csr(dd, RCV_ERR_MASK, 0);
13758         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13759         /* RCV_ERR_FORCE leave alone */
13760         for (i = 0; i < 32; i++)
13761                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13762         for (i = 0; i < 4; i++)
13763                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13764         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13765                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13766         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13767                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13768         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13769                 clear_rsm_rule(dd, i);
13770         for (i = 0; i < 32; i++)
13771                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13772
13773         /*
13774          * RXE Kernel and User Per-Context CSRs
13775          */
13776         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13777                 /* kernel */
13778                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13779                 /* RCV_CTXT_STATUS read-only */
13780                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13781                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13782                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13783                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13784                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13785                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13786                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13787                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13788                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13789                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13790
13791                 /* user */
13792                 /* RCV_HDR_TAIL read-only */
13793                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13794                 /* RCV_EGR_INDEX_TAIL read-only */
13795                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13796                 /* RCV_EGR_OFFSET_TAIL read-only */
13797                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13798                         write_uctxt_csr(dd, i,
13799                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
13800                 }
13801         }
13802 }
13803
13804 /*
13805  * Set sc2vl tables.
13806  *
13807  * They power on to zeros, so to avoid send context errors
13808  * they need to be set:
13809  *
13810  * SC 0-7 -> VL 0-7 (respectively)
13811  * SC 15  -> VL 15
13812  * otherwise
13813  *        -> VL 0
13814  */
13815 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13816 {
13817         int i;
13818         /* init per architecture spec, constrained by hardware capability */
13819
13820         /* HFI maps sent packets */
13821         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13822                 0,
13823                 0, 0, 1, 1,
13824                 2, 2, 3, 3,
13825                 4, 4, 5, 5,
13826                 6, 6, 7, 7));
13827         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13828                 1,
13829                 8, 0, 9, 0,
13830                 10, 0, 11, 0,
13831                 12, 0, 13, 0,
13832                 14, 0, 15, 15));
13833         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13834                 2,
13835                 16, 0, 17, 0,
13836                 18, 0, 19, 0,
13837                 20, 0, 21, 0,
13838                 22, 0, 23, 0));
13839         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13840                 3,
13841                 24, 0, 25, 0,
13842                 26, 0, 27, 0,
13843                 28, 0, 29, 0,
13844                 30, 0, 31, 0));
13845
13846         /* DC maps received packets */
13847         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13848                 15_0,
13849                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13850                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13851         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13852                 31_16,
13853                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13854                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13855
13856         /* initialize the cached sc2vl values consistently with h/w */
13857         for (i = 0; i < 32; i++) {
13858                 if (i < 8 || i == 15)
13859                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13860                 else
13861                         *((u8 *)(dd->sc2vl) + i) = 0;
13862         }
13863 }
13864
13865 /*
13866  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13867  * depend on the chip going through a power-on reset - a driver may be loaded
13868  * and unloaded many times.
13869  *
13870  * Do not write any CSR values to the chip in this routine - there may be
13871  * a reset following the (possible) FLR in this routine.
13872  *
13873  */
13874 static int init_chip(struct hfi1_devdata *dd)
13875 {
13876         int i;
13877         int ret = 0;
13878
13879         /*
13880          * Put the HFI CSRs in a known state.
13881          * Combine this with a DC reset.
13882          *
13883          * Stop the device from doing anything while we do a
13884          * reset.  We know there are no other active users of
13885          * the device since we are now in charge.  Turn off
13886          * off all outbound and inbound traffic and make sure
13887          * the device does not generate any interrupts.
13888          */
13889
13890         /* disable send contexts and SDMA engines */
13891         write_csr(dd, SEND_CTRL, 0);
13892         for (i = 0; i < chip_send_contexts(dd); i++)
13893                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13894         for (i = 0; i < chip_sdma_engines(dd); i++)
13895                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13896         /* disable port (turn off RXE inbound traffic) and contexts */
13897         write_csr(dd, RCV_CTRL, 0);
13898         for (i = 0; i < chip_rcv_contexts(dd); i++)
13899                 write_csr(dd, RCV_CTXT_CTRL, 0);
13900         /* mask all interrupt sources */
13901         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13902                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13903
13904         /*
13905          * DC Reset: do a full DC reset before the register clear.
13906          * A recommended length of time to hold is one CSR read,
13907          * so reread the CceDcCtrl.  Then, hold the DC in reset
13908          * across the clear.
13909          */
13910         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13911         (void)read_csr(dd, CCE_DC_CTRL);
13912
13913         if (use_flr) {
13914                 /*
13915                  * A FLR will reset the SPC core and part of the PCIe.
13916                  * The parts that need to be restored have already been
13917                  * saved.
13918                  */
13919                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13920
13921                 /* do the FLR, the DC reset will remain */
13922                 pcie_flr(dd->pcidev);
13923
13924                 /* restore command and BARs */
13925                 ret = restore_pci_variables(dd);
13926                 if (ret) {
13927                         dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13928                                    __func__);
13929                         return ret;
13930                 }
13931
13932                 if (is_ax(dd)) {
13933                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
13934                         pcie_flr(dd->pcidev);
13935                         ret = restore_pci_variables(dd);
13936                         if (ret) {
13937                                 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13938                                            __func__);
13939                                 return ret;
13940                         }
13941                 }
13942         } else {
13943                 dd_dev_info(dd, "Resetting CSRs with writes\n");
13944                 reset_cce_csrs(dd);
13945                 reset_txe_csrs(dd);
13946                 reset_rxe_csrs(dd);
13947                 reset_misc_csrs(dd);
13948         }
13949         /* clear the DC reset */
13950         write_csr(dd, CCE_DC_CTRL, 0);
13951
13952         /* Set the LED off */
13953         setextled(dd, 0);
13954
13955         /*
13956          * Clear the QSFP reset.
13957          * An FLR enforces a 0 on all out pins. The driver does not touch
13958          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13959          * anything plugged constantly in reset, if it pays attention
13960          * to RESET_N.
13961          * Prime examples of this are optical cables. Set all pins high.
13962          * I2CCLK and I2CDAT will change per direction, and INT_N and
13963          * MODPRS_N are input only and their value is ignored.
13964          */
13965         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13966         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13967         init_chip_resources(dd);
13968         return ret;
13969 }
13970
13971 static void init_early_variables(struct hfi1_devdata *dd)
13972 {
13973         int i;
13974
13975         /* assign link credit variables */
13976         dd->vau = CM_VAU;
13977         dd->link_credits = CM_GLOBAL_CREDITS;
13978         if (is_ax(dd))
13979                 dd->link_credits--;
13980         dd->vcu = cu_to_vcu(hfi1_cu);
13981         /* enough room for 8 MAD packets plus header - 17K */
13982         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13983         if (dd->vl15_init > dd->link_credits)
13984                 dd->vl15_init = dd->link_credits;
13985
13986         write_uninitialized_csrs_and_memories(dd);
13987
13988         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13989                 for (i = 0; i < dd->num_pports; i++) {
13990                         struct hfi1_pportdata *ppd = &dd->pport[i];
13991
13992                         set_partition_keys(ppd);
13993                 }
13994         init_sc2vl_tables(dd);
13995 }
13996
13997 static void init_kdeth_qp(struct hfi1_devdata *dd)
13998 {
13999         /* user changed the KDETH_QP */
14000         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14001                 /* out of range or illegal value */
14002                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14003                 kdeth_qp = 0;
14004         }
14005         if (kdeth_qp == 0)      /* not set, or failed range check */
14006                 kdeth_qp = DEFAULT_KDETH_QP;
14007
14008         write_csr(dd, SEND_BTH_QP,
14009                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14010                   SEND_BTH_QP_KDETH_QP_SHIFT);
14011
14012         write_csr(dd, RCV_BTH_QP,
14013                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14014                   RCV_BTH_QP_KDETH_QP_SHIFT);
14015 }
14016
14017 /**
14018  * init_qpmap_table
14019  * @dd - device data
14020  * @first_ctxt - first context
14021  * @last_ctxt - first context
14022  *
14023  * This return sets the qpn mapping table that
14024  * is indexed by qpn[8:1].
14025  *
14026  * The routine will round robin the 256 settings
14027  * from first_ctxt to last_ctxt.
14028  *
14029  * The first/last looks ahead to having specialized
14030  * receive contexts for mgmt and bypass.  Normal
14031  * verbs traffic will assumed to be on a range
14032  * of receive contexts.
14033  */
14034 static void init_qpmap_table(struct hfi1_devdata *dd,
14035                              u32 first_ctxt,
14036                              u32 last_ctxt)
14037 {
14038         u64 reg = 0;
14039         u64 regno = RCV_QP_MAP_TABLE;
14040         int i;
14041         u64 ctxt = first_ctxt;
14042
14043         for (i = 0; i < 256; i++) {
14044                 reg |= ctxt << (8 * (i % 8));
14045                 ctxt++;
14046                 if (ctxt > last_ctxt)
14047                         ctxt = first_ctxt;
14048                 if (i % 8 == 7) {
14049                         write_csr(dd, regno, reg);
14050                         reg = 0;
14051                         regno += 8;
14052                 }
14053         }
14054
14055         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14056                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14057 }
14058
14059 struct rsm_map_table {
14060         u64 map[NUM_MAP_REGS];
14061         unsigned int used;
14062 };
14063
14064 struct rsm_rule_data {
14065         u8 offset;
14066         u8 pkt_type;
14067         u32 field1_off;
14068         u32 field2_off;
14069         u32 index1_off;
14070         u32 index1_width;
14071         u32 index2_off;
14072         u32 index2_width;
14073         u32 mask1;
14074         u32 value1;
14075         u32 mask2;
14076         u32 value2;
14077 };
14078
14079 /*
14080  * Return an initialized RMT map table for users to fill in.  OK if it
14081  * returns NULL, indicating no table.
14082  */
14083 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14084 {
14085         struct rsm_map_table *rmt;
14086         u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14087
14088         rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14089         if (rmt) {
14090                 memset(rmt->map, rxcontext, sizeof(rmt->map));
14091                 rmt->used = 0;
14092         }
14093
14094         return rmt;
14095 }
14096
14097 /*
14098  * Write the final RMT map table to the chip and free the table.  OK if
14099  * table is NULL.
14100  */
14101 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14102                                    struct rsm_map_table *rmt)
14103 {
14104         int i;
14105
14106         if (rmt) {
14107                 /* write table to chip */
14108                 for (i = 0; i < NUM_MAP_REGS; i++)
14109                         write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14110
14111                 /* enable RSM */
14112                 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14113         }
14114 }
14115
14116 /*
14117  * Add a receive side mapping rule.
14118  */
14119 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14120                          struct rsm_rule_data *rrd)
14121 {
14122         write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14123                   (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14124                   1ull << rule_index | /* enable bit */
14125                   (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14126         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14127                   (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14128                   (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14129                   (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14130                   (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14131                   (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14132                   (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14133         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14134                   (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14135                   (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14136                   (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14137                   (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14138 }
14139
14140 /*
14141  * Clear a receive side mapping rule.
14142  */
14143 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14144 {
14145         write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14146         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14147         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14148 }
14149
14150 /* return the number of RSM map table entries that will be used for QOS */
14151 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14152                            unsigned int *np)
14153 {
14154         int i;
14155         unsigned int m, n;
14156         u8 max_by_vl = 0;
14157
14158         /* is QOS active at all? */
14159         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14160             num_vls == 1 ||
14161             krcvqsset <= 1)
14162                 goto no_qos;
14163
14164         /* determine bits for qpn */
14165         for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14166                 if (krcvqs[i] > max_by_vl)
14167                         max_by_vl = krcvqs[i];
14168         if (max_by_vl > 32)
14169                 goto no_qos;
14170         m = ilog2(__roundup_pow_of_two(max_by_vl));
14171
14172         /* determine bits for vl */
14173         n = ilog2(__roundup_pow_of_two(num_vls));
14174
14175         /* reject if too much is used */
14176         if ((m + n) > 7)
14177                 goto no_qos;
14178
14179         if (mp)
14180                 *mp = m;
14181         if (np)
14182                 *np = n;
14183
14184         return 1 << (m + n);
14185
14186 no_qos:
14187         if (mp)
14188                 *mp = 0;
14189         if (np)
14190                 *np = 0;
14191         return 0;
14192 }
14193
14194 /**
14195  * init_qos - init RX qos
14196  * @dd - device data
14197  * @rmt - RSM map table
14198  *
14199  * This routine initializes Rule 0 and the RSM map table to implement
14200  * quality of service (qos).
14201  *
14202  * If all of the limit tests succeed, qos is applied based on the array
14203  * interpretation of krcvqs where entry 0 is VL0.
14204  *
14205  * The number of vl bits (n) and the number of qpn bits (m) are computed to
14206  * feed both the RSM map table and the single rule.
14207  */
14208 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14209 {
14210         struct rsm_rule_data rrd;
14211         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14212         unsigned int rmt_entries;
14213         u64 reg;
14214
14215         if (!rmt)
14216                 goto bail;
14217         rmt_entries = qos_rmt_entries(dd, &m, &n);
14218         if (rmt_entries == 0)
14219                 goto bail;
14220         qpns_per_vl = 1 << m;
14221
14222         /* enough room in the map table? */
14223         rmt_entries = 1 << (m + n);
14224         if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14225                 goto bail;
14226
14227         /* add qos entries to the the RSM map table */
14228         for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14229                 unsigned tctxt;
14230
14231                 for (qpn = 0, tctxt = ctxt;
14232                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14233                         unsigned idx, regoff, regidx;
14234
14235                         /* generate the index the hardware will produce */
14236                         idx = rmt->used + ((qpn << n) ^ i);
14237                         regoff = (idx % 8) * 8;
14238                         regidx = idx / 8;
14239                         /* replace default with context number */
14240                         reg = rmt->map[regidx];
14241                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14242                                 << regoff);
14243                         reg |= (u64)(tctxt++) << regoff;
14244                         rmt->map[regidx] = reg;
14245                         if (tctxt == ctxt + krcvqs[i])
14246                                 tctxt = ctxt;
14247                 }
14248                 ctxt += krcvqs[i];
14249         }
14250
14251         rrd.offset = rmt->used;
14252         rrd.pkt_type = 2;
14253         rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14254         rrd.field2_off = LRH_SC_MATCH_OFFSET;
14255         rrd.index1_off = LRH_SC_SELECT_OFFSET;
14256         rrd.index1_width = n;
14257         rrd.index2_off = QPN_SELECT_OFFSET;
14258         rrd.index2_width = m + n;
14259         rrd.mask1 = LRH_BTH_MASK;
14260         rrd.value1 = LRH_BTH_VALUE;
14261         rrd.mask2 = LRH_SC_MASK;
14262         rrd.value2 = LRH_SC_VALUE;
14263
14264         /* add rule 0 */
14265         add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14266
14267         /* mark RSM map entries as used */
14268         rmt->used += rmt_entries;
14269         /* map everything else to the mcast/err/vl15 context */
14270         init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14271         dd->qos_shift = n + 1;
14272         return;
14273 bail:
14274         dd->qos_shift = 1;
14275         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14276 }
14277
14278 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14279                                     struct rsm_map_table *rmt)
14280 {
14281         struct rsm_rule_data rrd;
14282         u64 reg;
14283         int i, idx, regoff, regidx;
14284         u8 offset;
14285
14286         /* there needs to be enough room in the map table */
14287         if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14288                 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14289                 return;
14290         }
14291
14292         /*
14293          * RSM will extract the destination context as an index into the
14294          * map table.  The destination contexts are a sequential block
14295          * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14296          * Map entries are accessed as offset + extracted value.  Adjust
14297          * the added offset so this sequence can be placed anywhere in
14298          * the table - as long as the entries themselves do not wrap.
14299          * There are only enough bits in offset for the table size, so
14300          * start with that to allow for a "negative" offset.
14301          */
14302         offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14303                                                 (int)dd->first_dyn_alloc_ctxt);
14304
14305         for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14306                                 i < dd->num_rcv_contexts; i++, idx++) {
14307                 /* replace with identity mapping */
14308                 regoff = (idx % 8) * 8;
14309                 regidx = idx / 8;
14310                 reg = rmt->map[regidx];
14311                 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14312                 reg |= (u64)i << regoff;
14313                 rmt->map[regidx] = reg;
14314         }
14315
14316         /*
14317          * For RSM intercept of Expected FECN packets:
14318          * o packet type 0 - expected
14319          * o match on F (bit 95), using select/match 1, and
14320          * o match on SH (bit 133), using select/match 2.
14321          *
14322          * Use index 1 to extract the 8-bit receive context from DestQP
14323          * (start at bit 64).  Use that as the RSM map table index.
14324          */
14325         rrd.offset = offset;
14326         rrd.pkt_type = 0;
14327         rrd.field1_off = 95;
14328         rrd.field2_off = 133;
14329         rrd.index1_off = 64;
14330         rrd.index1_width = 8;
14331         rrd.index2_off = 0;
14332         rrd.index2_width = 0;
14333         rrd.mask1 = 1;
14334         rrd.value1 = 1;
14335         rrd.mask2 = 1;
14336         rrd.value2 = 1;
14337
14338         /* add rule 1 */
14339         add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14340
14341         rmt->used += dd->num_user_contexts;
14342 }
14343
14344 /* Initialize RSM for VNIC */
14345 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14346 {
14347         u8 i, j;
14348         u8 ctx_id = 0;
14349         u64 reg;
14350         u32 regoff;
14351         struct rsm_rule_data rrd;
14352
14353         if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14354                 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14355                            dd->vnic.rmt_start);
14356                 return;
14357         }
14358
14359         dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14360                 dd->vnic.rmt_start,
14361                 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14362
14363         /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14364         regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14365         reg = read_csr(dd, regoff);
14366         for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14367                 /* Update map register with vnic context */
14368                 j = (dd->vnic.rmt_start + i) % 8;
14369                 reg &= ~(0xffllu << (j * 8));
14370                 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14371                 /* Wrap up vnic ctx index */
14372                 ctx_id %= dd->vnic.num_ctxt;
14373                 /* Write back map register */
14374                 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14375                         dev_dbg(&(dd)->pcidev->dev,
14376                                 "Vnic rsm map reg[%d] =0x%llx\n",
14377                                 regoff - RCV_RSM_MAP_TABLE, reg);
14378
14379                         write_csr(dd, regoff, reg);
14380                         regoff += 8;
14381                         if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14382                                 reg = read_csr(dd, regoff);
14383                 }
14384         }
14385
14386         /* Add rule for vnic */
14387         rrd.offset = dd->vnic.rmt_start;
14388         rrd.pkt_type = 4;
14389         /* Match 16B packets */
14390         rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14391         rrd.mask1 = L2_TYPE_MASK;
14392         rrd.value1 = L2_16B_VALUE;
14393         /* Match ETH L4 packets */
14394         rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14395         rrd.mask2 = L4_16B_TYPE_MASK;
14396         rrd.value2 = L4_16B_ETH_VALUE;
14397         /* Calc context from veswid and entropy */
14398         rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14399         rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14400         rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14401         rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14402         add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14403
14404         /* Enable RSM if not already enabled */
14405         add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14406 }
14407
14408 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14409 {
14410         clear_rsm_rule(dd, RSM_INS_VNIC);
14411
14412         /* Disable RSM if used only by vnic */
14413         if (dd->vnic.rmt_start == 0)
14414                 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14415 }
14416
14417 static void init_rxe(struct hfi1_devdata *dd)
14418 {
14419         struct rsm_map_table *rmt;
14420         u64 val;
14421
14422         /* enable all receive errors */
14423         write_csr(dd, RCV_ERR_MASK, ~0ull);
14424
14425         rmt = alloc_rsm_map_table(dd);
14426         /* set up QOS, including the QPN map table */
14427         init_qos(dd, rmt);
14428         init_user_fecn_handling(dd, rmt);
14429         complete_rsm_map_table(dd, rmt);
14430         /* record number of used rsm map entries for vnic */
14431         dd->vnic.rmt_start = rmt->used;
14432         kfree(rmt);
14433
14434         /*
14435          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14436          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14437          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14438          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14439          * Max_PayLoad_Size set to its minimum of 128.
14440          *
14441          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14442          * (64 bytes).  Max_Payload_Size is possibly modified upward in
14443          * tune_pcie_caps() which is called after this routine.
14444          */
14445
14446         /* Have 16 bytes (4DW) of bypass header available in header queue */
14447         val = read_csr(dd, RCV_BYPASS);
14448         val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14449         val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14450                 RCV_BYPASS_HDR_SIZE_SHIFT);
14451         write_csr(dd, RCV_BYPASS, val);
14452 }
14453
14454 static void init_other(struct hfi1_devdata *dd)
14455 {
14456         /* enable all CCE errors */
14457         write_csr(dd, CCE_ERR_MASK, ~0ull);
14458         /* enable *some* Misc errors */
14459         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14460         /* enable all DC errors, except LCB */
14461         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14462         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14463 }
14464
14465 /*
14466  * Fill out the given AU table using the given CU.  A CU is defined in terms
14467  * AUs.  The table is a an encoding: given the index, how many AUs does that
14468  * represent?
14469  *
14470  * NOTE: Assumes that the register layout is the same for the
14471  * local and remote tables.
14472  */
14473 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14474                                u32 csr0to3, u32 csr4to7)
14475 {
14476         write_csr(dd, csr0to3,
14477                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14478                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14479                   2ull * cu <<
14480                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14481                   4ull * cu <<
14482                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14483         write_csr(dd, csr4to7,
14484                   8ull * cu <<
14485                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14486                   16ull * cu <<
14487                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14488                   32ull * cu <<
14489                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14490                   64ull * cu <<
14491                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14492 }
14493
14494 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14495 {
14496         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14497                            SEND_CM_LOCAL_AU_TABLE4_TO7);
14498 }
14499
14500 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14501 {
14502         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14503                            SEND_CM_REMOTE_AU_TABLE4_TO7);
14504 }
14505
14506 static void init_txe(struct hfi1_devdata *dd)
14507 {
14508         int i;
14509
14510         /* enable all PIO, SDMA, general, and Egress errors */
14511         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14512         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14513         write_csr(dd, SEND_ERR_MASK, ~0ull);
14514         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14515
14516         /* enable all per-context and per-SDMA engine errors */
14517         for (i = 0; i < chip_send_contexts(dd); i++)
14518                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14519         for (i = 0; i < chip_sdma_engines(dd); i++)
14520                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14521
14522         /* set the local CU to AU mapping */
14523         assign_local_cm_au_table(dd, dd->vcu);
14524
14525         /*
14526          * Set reasonable default for Credit Return Timer
14527          * Don't set on Simulator - causes it to choke.
14528          */
14529         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14530                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14531 }
14532
14533 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14534                        u16 jkey)
14535 {
14536         u8 hw_ctxt;
14537         u64 reg;
14538
14539         if (!rcd || !rcd->sc)
14540                 return -EINVAL;
14541
14542         hw_ctxt = rcd->sc->hw_context;
14543         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14544                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14545                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14546         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14547         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14548                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14549         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14550         /*
14551          * Enable send-side J_KEY integrity check, unless this is A0 h/w
14552          */
14553         if (!is_ax(dd)) {
14554                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14555                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14556                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14557         }
14558
14559         /* Enable J_KEY check on receive context. */
14560         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14561                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14562                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14563         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14564
14565         return 0;
14566 }
14567
14568 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14569 {
14570         u8 hw_ctxt;
14571         u64 reg;
14572
14573         if (!rcd || !rcd->sc)
14574                 return -EINVAL;
14575
14576         hw_ctxt = rcd->sc->hw_context;
14577         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14578         /*
14579          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14580          * This check would not have been enabled for A0 h/w, see
14581          * set_ctxt_jkey().
14582          */
14583         if (!is_ax(dd)) {
14584                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14585                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14586                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14587         }
14588         /* Turn off the J_KEY on the receive side */
14589         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14590
14591         return 0;
14592 }
14593
14594 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14595                        u16 pkey)
14596 {
14597         u8 hw_ctxt;
14598         u64 reg;
14599
14600         if (!rcd || !rcd->sc)
14601                 return -EINVAL;
14602
14603         hw_ctxt = rcd->sc->hw_context;
14604         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14605                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14606         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14607         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14608         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14609         reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14610         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14611
14612         return 0;
14613 }
14614
14615 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14616 {
14617         u8 hw_ctxt;
14618         u64 reg;
14619
14620         if (!ctxt || !ctxt->sc)
14621                 return -EINVAL;
14622
14623         hw_ctxt = ctxt->sc->hw_context;
14624         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14625         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14626         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14627         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14628
14629         return 0;
14630 }
14631
14632 /*
14633  * Start doing the clean up the the chip. Our clean up happens in multiple
14634  * stages and this is just the first.
14635  */
14636 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14637 {
14638         aspm_exit(dd);
14639         free_cntrs(dd);
14640         free_rcverr(dd);
14641         finish_chip_resources(dd);
14642 }
14643
14644 #define HFI_BASE_GUID(dev) \
14645         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14646
14647 /*
14648  * Information can be shared between the two HFIs on the same ASIC
14649  * in the same OS.  This function finds the peer device and sets
14650  * up a shared structure.
14651  */
14652 static int init_asic_data(struct hfi1_devdata *dd)
14653 {
14654         unsigned long flags;
14655         struct hfi1_devdata *tmp, *peer = NULL;
14656         struct hfi1_asic_data *asic_data;
14657         int ret = 0;
14658
14659         /* pre-allocate the asic structure in case we are the first device */
14660         asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14661         if (!asic_data)
14662                 return -ENOMEM;
14663
14664         spin_lock_irqsave(&hfi1_devs_lock, flags);
14665         /* Find our peer device */
14666         list_for_each_entry(tmp, &hfi1_dev_list, list) {
14667                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14668                     dd->unit != tmp->unit) {
14669                         peer = tmp;
14670                         break;
14671                 }
14672         }
14673
14674         if (peer) {
14675                 /* use already allocated structure */
14676                 dd->asic_data = peer->asic_data;
14677                 kfree(asic_data);
14678         } else {
14679                 dd->asic_data = asic_data;
14680                 mutex_init(&dd->asic_data->asic_resource_mutex);
14681         }
14682         dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14683         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14684
14685         /* first one through - set up i2c devices */
14686         if (!peer)
14687                 ret = set_up_i2c(dd, dd->asic_data);
14688
14689         return ret;
14690 }
14691
14692 /*
14693  * Set dd->boardname.  Use a generic name if a name is not returned from
14694  * EFI variable space.
14695  *
14696  * Return 0 on success, -ENOMEM if space could not be allocated.
14697  */
14698 static int obtain_boardname(struct hfi1_devdata *dd)
14699 {
14700         /* generic board description */
14701         const char generic[] =
14702                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14703         unsigned long size;
14704         int ret;
14705
14706         ret = read_hfi1_efi_var(dd, "description", &size,
14707                                 (void **)&dd->boardname);
14708         if (ret) {
14709                 dd_dev_info(dd, "Board description not found\n");
14710                 /* use generic description */
14711                 dd->boardname = kstrdup(generic, GFP_KERNEL);
14712                 if (!dd->boardname)
14713                         return -ENOMEM;
14714         }
14715         return 0;
14716 }
14717
14718 /*
14719  * Check the interrupt registers to make sure that they are mapped correctly.
14720  * It is intended to help user identify any mismapping by VMM when the driver
14721  * is running in a VM. This function should only be called before interrupt
14722  * is set up properly.
14723  *
14724  * Return 0 on success, -EINVAL on failure.
14725  */
14726 static int check_int_registers(struct hfi1_devdata *dd)
14727 {
14728         u64 reg;
14729         u64 all_bits = ~(u64)0;
14730         u64 mask;
14731
14732         /* Clear CceIntMask[0] to avoid raising any interrupts */
14733         mask = read_csr(dd, CCE_INT_MASK);
14734         write_csr(dd, CCE_INT_MASK, 0ull);
14735         reg = read_csr(dd, CCE_INT_MASK);
14736         if (reg)
14737                 goto err_exit;
14738
14739         /* Clear all interrupt status bits */
14740         write_csr(dd, CCE_INT_CLEAR, all_bits);
14741         reg = read_csr(dd, CCE_INT_STATUS);
14742         if (reg)
14743                 goto err_exit;
14744
14745         /* Set all interrupt status bits */
14746         write_csr(dd, CCE_INT_FORCE, all_bits);
14747         reg = read_csr(dd, CCE_INT_STATUS);
14748         if (reg != all_bits)
14749                 goto err_exit;
14750
14751         /* Restore the interrupt mask */
14752         write_csr(dd, CCE_INT_CLEAR, all_bits);
14753         write_csr(dd, CCE_INT_MASK, mask);
14754
14755         return 0;
14756 err_exit:
14757         write_csr(dd, CCE_INT_MASK, mask);
14758         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14759         return -EINVAL;
14760 }
14761
14762 /**
14763  * hfi1_init_dd() - Initialize most of the dd structure.
14764  * @dev: the pci_dev for hfi1_ib device
14765  * @ent: pci_device_id struct for this dev
14766  *
14767  * This is global, and is called directly at init to set up the
14768  * chip-specific function pointers for later use.
14769  */
14770 int hfi1_init_dd(struct hfi1_devdata *dd)
14771 {
14772         struct pci_dev *pdev = dd->pcidev;
14773         struct hfi1_pportdata *ppd;
14774         u64 reg;
14775         int i, ret;
14776         static const char * const inames[] = { /* implementation names */
14777                 "RTL silicon",
14778                 "RTL VCS simulation",
14779                 "RTL FPGA emulation",
14780                 "Functional simulator"
14781         };
14782         struct pci_dev *parent = pdev->bus->self;
14783         u32 sdma_engines = chip_sdma_engines(dd);
14784
14785         ppd = dd->pport;
14786         for (i = 0; i < dd->num_pports; i++, ppd++) {
14787                 int vl;
14788                 /* init common fields */
14789                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14790                 /* DC supports 4 link widths */
14791                 ppd->link_width_supported =
14792                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14793                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14794                 ppd->link_width_downgrade_supported =
14795                         ppd->link_width_supported;
14796                 /* start out enabling only 4X */
14797                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14798                 ppd->link_width_downgrade_enabled =
14799                                         ppd->link_width_downgrade_supported;
14800                 /* link width active is 0 when link is down */
14801                 /* link width downgrade active is 0 when link is down */
14802
14803                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14804                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
14805                         dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14806                                    num_vls, HFI1_MAX_VLS_SUPPORTED);
14807                         num_vls = HFI1_MAX_VLS_SUPPORTED;
14808                 }
14809                 ppd->vls_supported = num_vls;
14810                 ppd->vls_operational = ppd->vls_supported;
14811                 /* Set the default MTU. */
14812                 for (vl = 0; vl < num_vls; vl++)
14813                         dd->vld[vl].mtu = hfi1_max_mtu;
14814                 dd->vld[15].mtu = MAX_MAD_PACKET;
14815                 /*
14816                  * Set the initial values to reasonable default, will be set
14817                  * for real when link is up.
14818                  */
14819                 ppd->overrun_threshold = 0x4;
14820                 ppd->phy_error_threshold = 0xf;
14821                 ppd->port_crc_mode_enabled = link_crc_mask;
14822                 /* initialize supported LTP CRC mode */
14823                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14824                 /* initialize enabled LTP CRC mode */
14825                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14826                 /* start in offline */
14827                 ppd->host_link_state = HLS_DN_OFFLINE;
14828                 init_vl_arb_caches(ppd);
14829         }
14830
14831         /*
14832          * Do remaining PCIe setup and save PCIe values in dd.
14833          * Any error printing is already done by the init code.
14834          * On return, we have the chip mapped.
14835          */
14836         ret = hfi1_pcie_ddinit(dd, pdev);
14837         if (ret < 0)
14838                 goto bail_free;
14839
14840         /* Save PCI space registers to rewrite after device reset */
14841         ret = save_pci_variables(dd);
14842         if (ret < 0)
14843                 goto bail_cleanup;
14844
14845         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14846                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14847         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14848                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
14849
14850         /*
14851          * Check interrupt registers mapping if the driver has no access to
14852          * the upstream component. In this case, it is likely that the driver
14853          * is running in a VM.
14854          */
14855         if (!parent) {
14856                 ret = check_int_registers(dd);
14857                 if (ret)
14858                         goto bail_cleanup;
14859         }
14860
14861         /*
14862          * obtain the hardware ID - NOT related to unit, which is a
14863          * software enumeration
14864          */
14865         reg = read_csr(dd, CCE_REVISION2);
14866         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14867                                         & CCE_REVISION2_HFI_ID_MASK;
14868         /* the variable size will remove unwanted bits */
14869         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14870         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14871         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14872                     dd->icode < ARRAY_SIZE(inames) ?
14873                     inames[dd->icode] : "unknown", (int)dd->irev);
14874
14875         /* speeds the hardware can support */
14876         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14877         /* speeds allowed to run at */
14878         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14879         /* give a reasonable active value, will be set on link up */
14880         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14881
14882         /* fix up link widths for emulation _p */
14883         ppd = dd->pport;
14884         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14885                 ppd->link_width_supported =
14886                         ppd->link_width_enabled =
14887                         ppd->link_width_downgrade_supported =
14888                         ppd->link_width_downgrade_enabled =
14889                                 OPA_LINK_WIDTH_1X;
14890         }
14891         /* insure num_vls isn't larger than number of sdma engines */
14892         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
14893                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14894                            num_vls, sdma_engines);
14895                 num_vls = sdma_engines;
14896                 ppd->vls_supported = sdma_engines;
14897                 ppd->vls_operational = ppd->vls_supported;
14898         }
14899
14900         /*
14901          * Convert the ns parameter to the 64 * cclocks used in the CSR.
14902          * Limit the max if larger than the field holds.  If timeout is
14903          * non-zero, then the calculated field will be at least 1.
14904          *
14905          * Must be after icode is set up - the cclock rate depends
14906          * on knowing the hardware being used.
14907          */
14908         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14909         if (dd->rcv_intr_timeout_csr >
14910                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14911                 dd->rcv_intr_timeout_csr =
14912                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14913         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14914                 dd->rcv_intr_timeout_csr = 1;
14915
14916         /* needs to be done before we look for the peer device */
14917         read_guid(dd);
14918
14919         /* set up shared ASIC data with peer device */
14920         ret = init_asic_data(dd);
14921         if (ret)
14922                 goto bail_cleanup;
14923
14924         /* obtain chip sizes, reset chip CSRs */
14925         ret = init_chip(dd);
14926         if (ret)
14927                 goto bail_cleanup;
14928
14929         /* read in the PCIe link speed information */
14930         ret = pcie_speeds(dd);
14931         if (ret)
14932                 goto bail_cleanup;
14933
14934         /* call before get_platform_config(), after init_chip_resources() */
14935         ret = eprom_init(dd);
14936         if (ret)
14937                 goto bail_free_rcverr;
14938
14939         /* Needs to be called before hfi1_firmware_init */
14940         get_platform_config(dd);
14941
14942         /* read in firmware */
14943         ret = hfi1_firmware_init(dd);
14944         if (ret)
14945                 goto bail_cleanup;
14946
14947         /*
14948          * In general, the PCIe Gen3 transition must occur after the
14949          * chip has been idled (so it won't initiate any PCIe transactions
14950          * e.g. an interrupt) and before the driver changes any registers
14951          * (the transition will reset the registers).
14952          *
14953          * In particular, place this call after:
14954          * - init_chip()     - the chip will not initiate any PCIe transactions
14955          * - pcie_speeds()   - reads the current link speed
14956          * - hfi1_firmware_init() - the needed firmware is ready to be
14957          *                          downloaded
14958          */
14959         ret = do_pcie_gen3_transition(dd);
14960         if (ret)
14961                 goto bail_cleanup;
14962
14963         /*
14964          * This should probably occur in hfi1_pcie_init(), but historically
14965          * occurs after the do_pcie_gen3_transition() code.
14966          */
14967         tune_pcie_caps(dd);
14968
14969         /* start setting dd values and adjusting CSRs */
14970         init_early_variables(dd);
14971
14972         parse_platform_config(dd);
14973
14974         ret = obtain_boardname(dd);
14975         if (ret)
14976                 goto bail_cleanup;
14977
14978         snprintf(dd->boardversion, BOARD_VERS_MAX,
14979                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14980                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14981                  (u32)dd->majrev,
14982                  (u32)dd->minrev,
14983                  (dd->revision >> CCE_REVISION_SW_SHIFT)
14984                     & CCE_REVISION_SW_MASK);
14985
14986         ret = set_up_context_variables(dd);
14987         if (ret)
14988                 goto bail_cleanup;
14989
14990         /* set initial RXE CSRs */
14991         init_rxe(dd);
14992         /* set initial TXE CSRs */
14993         init_txe(dd);
14994         /* set initial non-RXE, non-TXE CSRs */
14995         init_other(dd);
14996         /* set up KDETH QP prefix in both RX and TX CSRs */
14997         init_kdeth_qp(dd);
14998
14999         ret = hfi1_dev_affinity_init(dd);
15000         if (ret)
15001                 goto bail_cleanup;
15002
15003         /* send contexts must be set up before receive contexts */
15004         ret = init_send_contexts(dd);
15005         if (ret)
15006                 goto bail_cleanup;
15007
15008         ret = hfi1_create_kctxts(dd);
15009         if (ret)
15010                 goto bail_cleanup;
15011
15012         /*
15013          * Initialize aspm, to be done after gen3 transition and setting up
15014          * contexts and before enabling interrupts
15015          */
15016         aspm_init(dd);
15017
15018         ret = init_pervl_scs(dd);
15019         if (ret)
15020                 goto bail_cleanup;
15021
15022         /* sdma init */
15023         for (i = 0; i < dd->num_pports; ++i) {
15024                 ret = sdma_init(dd, i);
15025                 if (ret)
15026                         goto bail_cleanup;
15027         }
15028
15029         /* use contexts created by hfi1_create_kctxts */
15030         ret = set_up_interrupts(dd);
15031         if (ret)
15032                 goto bail_cleanup;
15033
15034         ret = hfi1_comp_vectors_set_up(dd);
15035         if (ret)
15036                 goto bail_clear_intr;
15037
15038         /* set up LCB access - must be after set_up_interrupts() */
15039         init_lcb_access(dd);
15040
15041         /*
15042          * Serial number is created from the base guid:
15043          * [27:24] = base guid [38:35]
15044          * [23: 0] = base guid [23: 0]
15045          */
15046         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15047                  (dd->base_guid & 0xFFFFFF) |
15048                      ((dd->base_guid >> 11) & 0xF000000));
15049
15050         dd->oui1 = dd->base_guid >> 56 & 0xFF;
15051         dd->oui2 = dd->base_guid >> 48 & 0xFF;
15052         dd->oui3 = dd->base_guid >> 40 & 0xFF;
15053
15054         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15055         if (ret)
15056                 goto bail_clear_intr;
15057
15058         thermal_init(dd);
15059
15060         ret = init_cntrs(dd);
15061         if (ret)
15062                 goto bail_clear_intr;
15063
15064         ret = init_rcverr(dd);
15065         if (ret)
15066                 goto bail_free_cntrs;
15067
15068         init_completion(&dd->user_comp);
15069
15070         /* The user refcount starts with one to inidicate an active device */
15071         atomic_set(&dd->user_refcount, 1);
15072
15073         goto bail;
15074
15075 bail_free_rcverr:
15076         free_rcverr(dd);
15077 bail_free_cntrs:
15078         free_cntrs(dd);
15079 bail_clear_intr:
15080         hfi1_comp_vectors_clean_up(dd);
15081         msix_clean_up_interrupts(dd);
15082 bail_cleanup:
15083         hfi1_pcie_ddcleanup(dd);
15084 bail_free:
15085         hfi1_free_devdata(dd);
15086 bail:
15087         return ret;
15088 }
15089
15090 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15091                         u32 dw_len)
15092 {
15093         u32 delta_cycles;
15094         u32 current_egress_rate = ppd->current_egress_rate;
15095         /* rates here are in units of 10^6 bits/sec */
15096
15097         if (desired_egress_rate == -1)
15098                 return 0; /* shouldn't happen */
15099
15100         if (desired_egress_rate >= current_egress_rate)
15101                 return 0; /* we can't help go faster, only slower */
15102
15103         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15104                         egress_cycles(dw_len * 4, current_egress_rate);
15105
15106         return (u16)delta_cycles;
15107 }
15108
15109 /**
15110  * create_pbc - build a pbc for transmission
15111  * @flags: special case flags or-ed in built pbc
15112  * @srate: static rate
15113  * @vl: vl
15114  * @dwlen: dword length (header words + data words + pbc words)
15115  *
15116  * Create a PBC with the given flags, rate, VL, and length.
15117  *
15118  * NOTE: The PBC created will not insert any HCRC - all callers but one are
15119  * for verbs, which does not use this PSM feature.  The lone other caller
15120  * is for the diagnostic interface which calls this if the user does not
15121  * supply their own PBC.
15122  */
15123 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15124                u32 dw_len)
15125 {
15126         u64 pbc, delay = 0;
15127
15128         if (unlikely(srate_mbs))
15129                 delay = delay_cycles(ppd, srate_mbs, dw_len);
15130
15131         pbc = flags
15132                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15133                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15134                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15135                 | (dw_len & PBC_LENGTH_DWS_MASK)
15136                         << PBC_LENGTH_DWS_SHIFT;
15137
15138         return pbc;
15139 }
15140
15141 #define SBUS_THERMAL    0x4f
15142 #define SBUS_THERM_MONITOR_MODE 0x1
15143
15144 #define THERM_FAILURE(dev, ret, reason) \
15145         dd_dev_err((dd),                                                \
15146                    "Thermal sensor initialization failed: %s (%d)\n",   \
15147                    (reason), (ret))
15148
15149 /*
15150  * Initialize the thermal sensor.
15151  *
15152  * After initialization, enable polling of thermal sensor through
15153  * SBus interface. In order for this to work, the SBus Master
15154  * firmware has to be loaded due to the fact that the HW polling
15155  * logic uses SBus interrupts, which are not supported with
15156  * default firmware. Otherwise, no data will be returned through
15157  * the ASIC_STS_THERM CSR.
15158  */
15159 static int thermal_init(struct hfi1_devdata *dd)
15160 {
15161         int ret = 0;
15162
15163         if (dd->icode != ICODE_RTL_SILICON ||
15164             check_chip_resource(dd, CR_THERM_INIT, NULL))
15165                 return ret;
15166
15167         ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15168         if (ret) {
15169                 THERM_FAILURE(dd, ret, "Acquire SBus");
15170                 return ret;
15171         }
15172
15173         dd_dev_info(dd, "Initializing thermal sensor\n");
15174         /* Disable polling of thermal readings */
15175         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15176         msleep(100);
15177         /* Thermal Sensor Initialization */
15178         /*    Step 1: Reset the Thermal SBus Receiver */
15179         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15180                                 RESET_SBUS_RECEIVER, 0);
15181         if (ret) {
15182                 THERM_FAILURE(dd, ret, "Bus Reset");
15183                 goto done;
15184         }
15185         /*    Step 2: Set Reset bit in Thermal block */
15186         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15187                                 WRITE_SBUS_RECEIVER, 0x1);
15188         if (ret) {
15189                 THERM_FAILURE(dd, ret, "Therm Block Reset");
15190                 goto done;
15191         }
15192         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15193         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15194                                 WRITE_SBUS_RECEIVER, 0x32);
15195         if (ret) {
15196                 THERM_FAILURE(dd, ret, "Write Clock Div");
15197                 goto done;
15198         }
15199         /*    Step 4: Select temperature mode */
15200         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15201                                 WRITE_SBUS_RECEIVER,
15202                                 SBUS_THERM_MONITOR_MODE);
15203         if (ret) {
15204                 THERM_FAILURE(dd, ret, "Write Mode Sel");
15205                 goto done;
15206         }
15207         /*    Step 5: De-assert block reset and start conversion */
15208         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15209                                 WRITE_SBUS_RECEIVER, 0x2);
15210         if (ret) {
15211                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15212                 goto done;
15213         }
15214         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15215         msleep(22);
15216
15217         /* Enable polling of thermal readings */
15218         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15219
15220         /* Set initialized flag */
15221         ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15222         if (ret)
15223                 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15224
15225 done:
15226         release_chip_resource(dd, CR_SBUS);
15227         return ret;
15228 }
15229
15230 static void handle_temp_err(struct hfi1_devdata *dd)
15231 {
15232         struct hfi1_pportdata *ppd = &dd->pport[0];
15233         /*
15234          * Thermal Critical Interrupt
15235          * Put the device into forced freeze mode, take link down to
15236          * offline, and put DC into reset.
15237          */
15238         dd_dev_emerg(dd,
15239                      "Critical temperature reached! Forcing device into freeze mode!\n");
15240         dd->flags |= HFI1_FORCED_FREEZE;
15241         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15242         /*
15243          * Shut DC down as much and as quickly as possible.
15244          *
15245          * Step 1: Take the link down to OFFLINE. This will cause the
15246          *         8051 to put the Serdes in reset. However, we don't want to
15247          *         go through the entire link state machine since we want to
15248          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15249          *         but rather an attempt to save the chip.
15250          *         Code below is almost the same as quiet_serdes() but avoids
15251          *         all the extra work and the sleeps.
15252          */
15253         ppd->driver_link_ready = 0;
15254         ppd->link_enabled = 0;
15255         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15256                                 PLS_OFFLINE);
15257         /*
15258          * Step 2: Shutdown LCB and 8051
15259          *         After shutdown, do not restore DC_CFG_RESET value.
15260          */
15261         dc_shutdown(dd);
15262 }