]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/hfi1/chip.c
Merge tag 'block-5.6-2020-02-22' of git://git.kernel.dk/linux-block
[linux.git] / drivers / infiniband / hw / hfi1 / chip.c
1 /*
2  * Copyright(c) 2015 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 #include "affinity.h"
67 #include "debugfs.h"
68 #include "fault.h"
69
70 uint kdeth_qp;
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78 /*
79  * Default time to aggregate two 10K packets from the idle state
80  * (timer not running). The timer starts at the end of the first packet,
81  * so only the time for one 10K packet and header plus a bit extra is needed.
82  * 10 * 1024 + 64 header byte = 10304 byte
83  * 10304 byte / 12.5 GB/s = 824.32ns
84  */
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97 uint loopback;
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
106
107 struct flag_table {
108         u64 flag;       /* the flag */
109         char *str;      /* description string */
110         u16 extra;      /* extra information */
111         u16 unused0;
112         u32 unused1;
113 };
114
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED       0x1
121 #define SEC_PACKET_DROPPED      0x2
122 #define SEC_SC_HALTED           0x4     /* per-context only */
123 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
124
125 #define DEFAULT_KRCVQS            2
126 #define MIN_KERNEL_KCTXTS         2
127 #define FIRST_KERNEL_KCTXT        1
128
129 /*
130  * RSM instance allocation
131  *   0 - Verbs
132  *   1 - User Fecn Handling
133  *   2 - Vnic
134  */
135 #define RSM_INS_VERBS             0
136 #define RSM_INS_FECN              1
137 #define RSM_INS_VNIC              2
138
139 /* Bit offset into the GUID which carries HFI id information */
140 #define GUID_HFI_INDEX_SHIFT     39
141
142 /* extract the emulation revision */
143 #define emulator_rev(dd) ((dd)->irev >> 8)
144 /* parallel and serial emulation versions are 3 and 4 respectively */
145 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
146 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
147
148 /* RSM fields for Verbs */
149 /* packet type */
150 #define IB_PACKET_TYPE         2ull
151 #define QW_SHIFT               6ull
152 /* QPN[7..1] */
153 #define QPN_WIDTH              7ull
154
155 /* LRH.BTH: QW 0, OFFSET 48 - for match */
156 #define LRH_BTH_QW             0ull
157 #define LRH_BTH_BIT_OFFSET     48ull
158 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
159 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
160 #define LRH_BTH_SELECT
161 #define LRH_BTH_MASK           3ull
162 #define LRH_BTH_VALUE          2ull
163
164 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
165 #define LRH_SC_QW              0ull
166 #define LRH_SC_BIT_OFFSET      56ull
167 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
168 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
169 #define LRH_SC_MASK            128ull
170 #define LRH_SC_VALUE           0ull
171
172 /* SC[n..0] QW 0, OFFSET 60 - for select */
173 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
174
175 /* QPN[m+n:1] QW 1, OFFSET 1 */
176 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
177
178 /* RSM fields for Vnic */
179 /* L2_TYPE: QW 0, OFFSET 61 - for match */
180 #define L2_TYPE_QW             0ull
181 #define L2_TYPE_BIT_OFFSET     61ull
182 #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
183 #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
184 #define L2_TYPE_MASK           3ull
185 #define L2_16B_VALUE           2ull
186
187 /* L4_TYPE QW 1, OFFSET 0 - for match */
188 #define L4_TYPE_QW              1ull
189 #define L4_TYPE_BIT_OFFSET      0ull
190 #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
191 #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
192 #define L4_16B_TYPE_MASK        0xFFull
193 #define L4_16B_ETH_VALUE        0x78ull
194
195 /* 16B VESWID - for select */
196 #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
197 /* 16B ENTROPY - for select */
198 #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
199
200 /* defines to build power on SC2VL table */
201 #define SC2VL_VAL( \
202         num, \
203         sc0, sc0val, \
204         sc1, sc1val, \
205         sc2, sc2val, \
206         sc3, sc3val, \
207         sc4, sc4val, \
208         sc5, sc5val, \
209         sc6, sc6val, \
210         sc7, sc7val) \
211 ( \
212         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
213         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
214         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
215         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
216         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
217         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
218         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
219         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
220 )
221
222 #define DC_SC_VL_VAL( \
223         range, \
224         e0, e0val, \
225         e1, e1val, \
226         e2, e2val, \
227         e3, e3val, \
228         e4, e4val, \
229         e5, e5val, \
230         e6, e6val, \
231         e7, e7val, \
232         e8, e8val, \
233         e9, e9val, \
234         e10, e10val, \
235         e11, e11val, \
236         e12, e12val, \
237         e13, e13val, \
238         e14, e14val, \
239         e15, e15val) \
240 ( \
241         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
242         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
243         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
244         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
245         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
246         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
247         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
248         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
249         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
250         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
251         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
252         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
253         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
254         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
255         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
256         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
257 )
258
259 /* all CceStatus sub-block freeze bits */
260 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
261                         | CCE_STATUS_RXE_FROZE_SMASK \
262                         | CCE_STATUS_TXE_FROZE_SMASK \
263                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
264 /* all CceStatus sub-block TXE pause bits */
265 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
266                         | CCE_STATUS_TXE_PAUSED_SMASK \
267                         | CCE_STATUS_SDMA_PAUSED_SMASK)
268 /* all CceStatus sub-block RXE pause bits */
269 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
270
271 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
272 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
273
274 /*
275  * CCE Error flags.
276  */
277 static struct flag_table cce_err_status_flags[] = {
278 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
279                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
280 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
281                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
282 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
283                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
284 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
285                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
286 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
287                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
288 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
289                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
290 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
291                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
292 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
293                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
294 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
295                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
296 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
297             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
298 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
300 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
301             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
302 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
303                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
304 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
305                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
306 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
307                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
308 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
309                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
310 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
311                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
312 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
313                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
314 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
315                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
316 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
317                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
318 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
319                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
320 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
321                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
322 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
323                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
324 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
325                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
326 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
327                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
328 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
329                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
330 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
331                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
332 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
333                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
334 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
335                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
336 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
337                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
338 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
339                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
340 /*31*/  FLAG_ENTRY0("LATriggered",
341                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
342 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
343                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
344 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
345                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
346 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
347                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
348 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
349                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
350 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
351                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
352 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
353                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
354 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
355                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
356 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
357                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
358 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
359                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
360 /*41-63 reserved*/
361 };
362
363 /*
364  * Misc Error flags
365  */
366 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
367 static struct flag_table misc_err_status_flags[] = {
368 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
369 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
370 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
371 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
372 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
373 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
374 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
375 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
376 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
377 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
378 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
379 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
380 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
381 };
382
383 /*
384  * TXE PIO Error flags and consequences
385  */
386 static struct flag_table pio_err_status_flags[] = {
387 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
388         SEC_WRITE_DROPPED,
389         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
390 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
391         SEC_SPC_FREEZE,
392         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
393 /* 2*/  FLAG_ENTRY("PioCsrParity",
394         SEC_SPC_FREEZE,
395         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
396 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
397         SEC_SPC_FREEZE,
398         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
399 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
400         SEC_SPC_FREEZE,
401         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
402 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
403         SEC_SPC_FREEZE,
404         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
405 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
406         SEC_SPC_FREEZE,
407         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
408 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
409         SEC_SPC_FREEZE,
410         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
411 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
412         SEC_SPC_FREEZE,
413         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
414 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
415         SEC_SPC_FREEZE,
416         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
417 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
418         SEC_SPC_FREEZE,
419         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
420 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
421         SEC_SPC_FREEZE,
422         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
423 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
424         SEC_SPC_FREEZE,
425         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
426 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
427         0,
428         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
429 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
430         0,
431         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
432 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
433         SEC_SPC_FREEZE,
434         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
435 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
436         SEC_SPC_FREEZE,
437         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
438 /*17*/  FLAG_ENTRY("PioInitSmIn",
439         0,
440         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
441 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
442         SEC_SPC_FREEZE,
443         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
444 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
445         SEC_SPC_FREEZE,
446         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
447 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
448         0,
449         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
450 /*21*/  FLAG_ENTRY("PioWriteDataParity",
451         SEC_SPC_FREEZE,
452         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
453 /*22*/  FLAG_ENTRY("PioStateMachine",
454         SEC_SPC_FREEZE,
455         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
456 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
457         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
458         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
459 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
460         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
461         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
462 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
463         SEC_SPC_FREEZE,
464         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
465 /*26*/  FLAG_ENTRY("PioVlfSopParity",
466         SEC_SPC_FREEZE,
467         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
468 /*27*/  FLAG_ENTRY("PioVlFifoParity",
469         SEC_SPC_FREEZE,
470         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
471 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
472         SEC_SPC_FREEZE,
473         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
474 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
475         SEC_SPC_FREEZE,
476         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
477 /*30-31 reserved*/
478 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
479         SEC_SPC_FREEZE,
480         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
481 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
482         SEC_SPC_FREEZE,
483         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
484 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
485         SEC_SPC_FREEZE,
486         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
487 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
488         SEC_SPC_FREEZE,
489         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
490 /*36-63 reserved*/
491 };
492
493 /* TXE PIO errors that cause an SPC freeze */
494 #define ALL_PIO_FREEZE_ERR \
495         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
496         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
497         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
498         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
499         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
500         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
501         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
502         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
503         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
504         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
505         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
506         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
507         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
508         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
509         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
510         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
511         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
512         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
513         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
514         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
515         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
516         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
517         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
518         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
519         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
520         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
521         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
522         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
523         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
524
525 /*
526  * TXE SDMA Error flags
527  */
528 static struct flag_table sdma_err_status_flags[] = {
529 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
530                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
531 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
532                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
533 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
534                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
535 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
536                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
537 /*04-63 reserved*/
538 };
539
540 /* TXE SDMA errors that cause an SPC freeze */
541 #define ALL_SDMA_FREEZE_ERR  \
542                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
543                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
544                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
545
546 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
547 #define PORT_DISCARD_EGRESS_ERRS \
548         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
549         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
550         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
551
552 /*
553  * TXE Egress Error flags
554  */
555 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
556 static struct flag_table egress_err_status_flags[] = {
557 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
558 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
559 /* 2 reserved */
560 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
561                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
562 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
563 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
564 /* 6 reserved */
565 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
566                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
567 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
568                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
569 /* 9-10 reserved */
570 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
571                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
572 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
573 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
574 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
575 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
576 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
577                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
578 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
579                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
580 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
581                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
582 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
583                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
584 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
585                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
586 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
587                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
588 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
589                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
590 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
591                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
592 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
593                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
594 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
595                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
596 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
597                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
598 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
599                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
600 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
601                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
602 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
603                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
604 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
605                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
606 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
607                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
608 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
609                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
610 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
611                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
612 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
613                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
614 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
615                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
616 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
617                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
618 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
619                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
620 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
621                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
622 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
623                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
624 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
625                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
626 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
627 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
628 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
629 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
630 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
631 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
632 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
633 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
634 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
635 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
636 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
637 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
638 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
639 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
640 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
641 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
642 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
643 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
644 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
645 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
646 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
647 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
648                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
649 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
650                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
651 };
652
653 /*
654  * TXE Egress Error Info flags
655  */
656 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
657 static struct flag_table egress_err_info_flags[] = {
658 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
659 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
660 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
661 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
663 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
664 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
665 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
666 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
667 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
668 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
669 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
670 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
671 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
672 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
673 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
674 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
675 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
676 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
677 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
678 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
679 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
680 };
681
682 /* TXE Egress errors that cause an SPC freeze */
683 #define ALL_TXE_EGRESS_FREEZE_ERR \
684         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
685         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
686         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
687         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
688         | SEES(TX_LAUNCH_CSR_PARITY) \
689         | SEES(TX_SBRD_CTL_CSR_PARITY) \
690         | SEES(TX_CONFIG_PARITY) \
691         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
692         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
693         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
694         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
695         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
696         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
697         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
698         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
699         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
700         | SEES(TX_CREDIT_RETURN_PARITY))
701
702 /*
703  * TXE Send error flags
704  */
705 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
706 static struct flag_table send_err_status_flags[] = {
707 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
708 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
709 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
710 };
711
712 /*
713  * TXE Send Context Error flags and consequences
714  */
715 static struct flag_table sc_err_status_flags[] = {
716 /* 0*/  FLAG_ENTRY("InconsistentSop",
717                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
718                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
719 /* 1*/  FLAG_ENTRY("DisallowedPacket",
720                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
721                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
722 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
723                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
724                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
725 /* 3*/  FLAG_ENTRY("WriteOverflow",
726                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
727                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
728 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
729                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
730                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
731 /* 5-63 reserved*/
732 };
733
734 /*
735  * RXE Receive Error flags
736  */
737 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
738 static struct flag_table rxe_err_status_flags[] = {
739 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
740 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
741 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
742 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
743 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
744 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
745 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
746 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
747 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
748 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
749 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
750 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
751 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
752 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
753 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
754 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
755 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
756                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
757 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
758 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
759 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
760                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
761 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
762                 RXES(RBUF_BLOCK_LIST_READ_COR)),
763 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
764                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
765 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
766                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
767 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
768                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
769 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
770                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
771 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
772 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
773 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
774                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
775 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
776 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
777 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
778 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
779 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
780 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
781 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
782 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
783                 RXES(RBUF_FL_INITDONE_PARITY)),
784 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
785                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
786 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
787 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
788 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
789 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
790                 RXES(LOOKUP_DES_PART1_UNC_COR)),
791 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
792                 RXES(LOOKUP_DES_PART2_PARITY)),
793 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
794 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
795 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
796 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
797 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
798 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
799 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
800 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
801 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
802 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
803 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
804 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
805 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
806 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
807 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
808 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
809 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
810 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
811 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
812 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
813 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
814 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
815 };
816
817 /* RXE errors that will trigger an SPC freeze */
818 #define ALL_RXE_FREEZE_ERR  \
819         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
820         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
821         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
823         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
824         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
825         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
826         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
827         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
828         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
829         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
830         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
831         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
832         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
833         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
834         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
835         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
836         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
837         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
838         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
839         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
840         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
841         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
842         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
843         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
844         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
845         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
846         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
847         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
848         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
849         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
850         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
851         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
852         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
853         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
854         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
855         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
856         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
857         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
858         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
859         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
860         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
861         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
862         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
863
864 #define RXE_FREEZE_ABORT_MASK \
865         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
866         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
867         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
868
869 /*
870  * DCC Error Flags
871  */
872 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
873 static struct flag_table dcc_err_flags[] = {
874         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
875         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
876         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
877         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
878         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
879         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
880         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
881         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
882         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
883         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
884         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
885         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
886         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
887         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
888         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
889         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
890         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
891         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
892         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
893         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
894         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
895         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
896         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
897         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
898         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
899         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
900         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
901         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
902         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
903         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
904         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
905         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
906         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
907         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
908         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
909         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
910         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
911         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
912         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
913         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
914         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
915         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
916         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
917         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
918         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
919         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
920 };
921
922 /*
923  * LCB error flags
924  */
925 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
926 static struct flag_table lcb_err_flags[] = {
927 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
928 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
929 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
930 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
931                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
932 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
933 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
934 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
935 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
936 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
937 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
938 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
939 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
940 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
941 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
942                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
943 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
944 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
945 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
946 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
947 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
948 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
949                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
950 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
951 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
952 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
953 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
954 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
955 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
956 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
957                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
958 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
959 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
960                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
961 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
962                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
963 };
964
965 /*
966  * DC8051 Error Flags
967  */
968 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
969 static struct flag_table dc8051_err_flags[] = {
970         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
971         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
972         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
973         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
974         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
975         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
976         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
977         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
978         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
979                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
980         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
981 };
982
983 /*
984  * DC8051 Information Error flags
985  *
986  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
987  */
988 static struct flag_table dc8051_info_err_flags[] = {
989         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
990         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
991         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
992         FLAG_ENTRY0("Serdes internal loopback failure",
993                     FAILED_SERDES_INTERNAL_LOOPBACK),
994         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
995         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
996         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
997         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
998         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
999         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1000         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1001         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1002         FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1003         FLAG_ENTRY0("External Device Request Timeout",
1004                     EXTERNAL_DEVICE_REQ_TIMEOUT),
1005 };
1006
1007 /*
1008  * DC8051 Information Host Information flags
1009  *
1010  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1011  */
1012 static struct flag_table dc8051_info_host_msg_flags[] = {
1013         FLAG_ENTRY0("Host request done", 0x0001),
1014         FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1015         FLAG_ENTRY0("BC SMA message", 0x0004),
1016         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1017         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1018         FLAG_ENTRY0("External device config request", 0x0020),
1019         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1020         FLAG_ENTRY0("LinkUp achieved", 0x0080),
1021         FLAG_ENTRY0("Link going down", 0x0100),
1022         FLAG_ENTRY0("Link width downgraded", 0x0200),
1023 };
1024
1025 static u32 encoded_size(u32 size);
1026 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1029                                u8 *continuous);
1030 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033                                       u8 *remote_tx_rate, u16 *link_widths);
1034 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1035                                     u8 *flag_bits, u16 *link_widths);
1036 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1037                                   u8 *device_rev);
1038 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1039 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1040                             u8 *tx_polarity_inversion,
1041                             u8 *rx_polarity_inversion, u8 *max_rate);
1042 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1043                                 unsigned int context, u64 err_status);
1044 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1045 static void handle_dcc_err(struct hfi1_devdata *dd,
1046                            unsigned int context, u64 err_status);
1047 static void handle_lcb_err(struct hfi1_devdata *dd,
1048                            unsigned int context, u64 err_status);
1049 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1050 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void set_partition_keys(struct hfi1_pportdata *ppd);
1058 static const char *link_state_name(u32 state);
1059 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1060                                           u32 state);
1061 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1062                            u64 *out_data);
1063 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1064 static int thermal_init(struct hfi1_devdata *dd);
1065
1066 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1067 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1068                                             int msecs);
1069 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070                                   int msecs);
1071 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1074                                    int msecs);
1075 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1076                                          int msecs);
1077 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079 static void handle_temp_err(struct hfi1_devdata *dd);
1080 static void dc_shutdown(struct hfi1_devdata *dd);
1081 static void dc_start(struct hfi1_devdata *dd);
1082 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083                            unsigned int *np);
1084 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1088
1089 /*
1090  * Error interrupt table entry.  This is used as input to the interrupt
1091  * "clear down" routine used for all second tier error interrupt register.
1092  * Second tier interrupt registers have a single bit representing them
1093  * in the top-level CceIntStatus.
1094  */
1095 struct err_reg_info {
1096         u32 status;             /* status CSR offset */
1097         u32 clear;              /* clear CSR offset */
1098         u32 mask;               /* mask CSR offset */
1099         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100         const char *desc;
1101 };
1102
1103 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1104 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1105 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1106
1107 /*
1108  * Helpers for building HFI and DC error interrupt table entries.  Different
1109  * helpers are needed because of inconsistent register names.
1110  */
1111 #define EE(reg, handler, desc) \
1112         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113                 handler, desc }
1114 #define DC_EE1(reg, handler, desc) \
1115         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116 #define DC_EE2(reg, handler, desc) \
1117         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1118
1119 /*
1120  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1121  * another register containing more information.
1122  */
1123 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1125 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1126 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1127 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1128 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1129 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1130 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1132         /* the rest are reserved */
1133 };
1134
1135 /*
1136  * Index into the Various section of the interrupt sources
1137  * corresponding to the Critical Temperature interrupt.
1138  */
1139 #define TCRIT_INT_SOURCE 4
1140
1141 /*
1142  * SDMA error interrupt entry - refers to another register containing more
1143  * information.
1144  */
1145 static const struct err_reg_info sdma_eng_err =
1146         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147
1148 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1150 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1151 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1152 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1153 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1154         /* rest are reserved */
1155 };
1156
1157 /*
1158  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1159  * register can not be derived from the MTU value because 10K is not
1160  * a power of 2. Therefore, we need a constant. Everything else can
1161  * be calculated.
1162  */
1163 #define DCC_CFG_PORT_MTU_CAP_10240 7
1164
1165 /*
1166  * Table of the DC grouping of error interrupts.  Each entry refers to
1167  * another register containing more information.
1168  */
1169 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1171 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1172 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1173 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1174         /* the rest are reserved */
1175 };
1176
1177 struct cntr_entry {
1178         /*
1179          * counter name
1180          */
1181         char *name;
1182
1183         /*
1184          * csr to read for name (if applicable)
1185          */
1186         u64 csr;
1187
1188         /*
1189          * offset into dd or ppd to store the counter's value
1190          */
1191         int offset;
1192
1193         /*
1194          * flags
1195          */
1196         u8 flags;
1197
1198         /*
1199          * accessor for stat element, context either dd or ppd
1200          */
1201         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202                        int mode, u64 data);
1203 };
1204
1205 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207
1208 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1209 { \
1210         name, \
1211         csr, \
1212         offset, \
1213         flags, \
1214         accessor \
1215 }
1216
1217 /* 32bit RXE */
1218 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219 CNTR_ELEM(#name, \
1220           (counter * 8 + RCV_COUNTER_ARRAY32), \
1221           0, flags | CNTR_32BIT, \
1222           port_access_u32_csr)
1223
1224 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225 CNTR_ELEM(#name, \
1226           (counter * 8 + RCV_COUNTER_ARRAY32), \
1227           0, flags | CNTR_32BIT, \
1228           dev_access_u32_csr)
1229
1230 /* 64bit RXE */
1231 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232 CNTR_ELEM(#name, \
1233           (counter * 8 + RCV_COUNTER_ARRAY64), \
1234           0, flags, \
1235           port_access_u64_csr)
1236
1237 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239           (counter * 8 + RCV_COUNTER_ARRAY64), \
1240           0, flags, \
1241           dev_access_u64_csr)
1242
1243 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244 #define OVR_ELM(ctx) \
1245 CNTR_ELEM("RcvHdrOvr" #ctx, \
1246           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247           0, CNTR_NORMAL, port_access_u64_csr)
1248
1249 /* 32bit TXE */
1250 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251 CNTR_ELEM(#name, \
1252           (counter * 8 + SEND_COUNTER_ARRAY32), \
1253           0, flags | CNTR_32BIT, \
1254           port_access_u32_csr)
1255
1256 /* 64bit TXE */
1257 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258 CNTR_ELEM(#name, \
1259           (counter * 8 + SEND_COUNTER_ARRAY64), \
1260           0, flags, \
1261           port_access_u64_csr)
1262
1263 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264 CNTR_ELEM(#name,\
1265           counter * 8 + SEND_COUNTER_ARRAY64, \
1266           0, \
1267           flags, \
1268           dev_access_u64_csr)
1269
1270 /* CCE */
1271 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272 CNTR_ELEM(#name, \
1273           (counter * 8 + CCE_COUNTER_ARRAY32), \
1274           0, flags | CNTR_32BIT, \
1275           dev_access_u32_csr)
1276
1277 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278 CNTR_ELEM(#name, \
1279           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280           0, flags | CNTR_32BIT, \
1281           dev_access_u32_csr)
1282
1283 /* DC */
1284 #define DC_PERF_CNTR(name, counter, flags) \
1285 CNTR_ELEM(#name, \
1286           counter, \
1287           0, \
1288           flags, \
1289           dev_access_u64_csr)
1290
1291 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1292 CNTR_ELEM(#name, \
1293           counter, \
1294           0, \
1295           flags, \
1296           dc_access_lcb_cntr)
1297
1298 /* ibp counters */
1299 #define SW_IBP_CNTR(name, cntr) \
1300 CNTR_ELEM(#name, \
1301           0, \
1302           0, \
1303           CNTR_SYNTH, \
1304           access_ibp_##cntr)
1305
1306 /**
1307  * hfi_addr_from_offset - return addr for readq/writeq
1308  * @dd - the dd device
1309  * @offset - the offset of the CSR within bar0
1310  *
1311  * This routine selects the appropriate base address
1312  * based on the indicated offset.
1313  */
1314 static inline void __iomem *hfi1_addr_from_offset(
1315         const struct hfi1_devdata *dd,
1316         u32 offset)
1317 {
1318         if (offset >= dd->base2_start)
1319                 return dd->kregbase2 + (offset - dd->base2_start);
1320         return dd->kregbase1 + offset;
1321 }
1322
1323 /**
1324  * read_csr - read CSR at the indicated offset
1325  * @dd - the dd device
1326  * @offset - the offset of the CSR within bar0
1327  *
1328  * Return: the value read or all FF's if there
1329  * is no mapping
1330  */
1331 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332 {
1333         if (dd->flags & HFI1_PRESENT)
1334                 return readq(hfi1_addr_from_offset(dd, offset));
1335         return -1;
1336 }
1337
1338 /**
1339  * write_csr - write CSR at the indicated offset
1340  * @dd - the dd device
1341  * @offset - the offset of the CSR within bar0
1342  * @value - value to write
1343  */
1344 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345 {
1346         if (dd->flags & HFI1_PRESENT) {
1347                 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348
1349                 /* avoid write to RcvArray */
1350                 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351                         return;
1352                 writeq(value, base);
1353         }
1354 }
1355
1356 /**
1357  * get_csr_addr - return te iomem address for offset
1358  * @dd - the dd device
1359  * @offset - the offset of the CSR within bar0
1360  *
1361  * Return: The iomem address to use in subsequent
1362  * writeq/readq operations.
1363  */
1364 void __iomem *get_csr_addr(
1365         const struct hfi1_devdata *dd,
1366         u32 offset)
1367 {
1368         if (dd->flags & HFI1_PRESENT)
1369                 return hfi1_addr_from_offset(dd, offset);
1370         return NULL;
1371 }
1372
1373 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374                                  int mode, u64 value)
1375 {
1376         u64 ret;
1377
1378         if (mode == CNTR_MODE_R) {
1379                 ret = read_csr(dd, csr);
1380         } else if (mode == CNTR_MODE_W) {
1381                 write_csr(dd, csr, value);
1382                 ret = value;
1383         } else {
1384                 dd_dev_err(dd, "Invalid cntr register access mode");
1385                 return 0;
1386         }
1387
1388         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1389         return ret;
1390 }
1391
1392 /* Dev Access */
1393 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394                               void *context, int vl, int mode, u64 data)
1395 {
1396         struct hfi1_devdata *dd = context;
1397         u64 csr = entry->csr;
1398
1399         if (entry->flags & CNTR_SDMA) {
1400                 if (vl == CNTR_INVALID_VL)
1401                         return 0;
1402                 csr += 0x100 * vl;
1403         } else {
1404                 if (vl != CNTR_INVALID_VL)
1405                         return 0;
1406         }
1407         return read_write_csr(dd, csr, mode, data);
1408 }
1409
1410 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411                               void *context, int idx, int mode, u64 data)
1412 {
1413         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414
1415         if (dd->per_sdma && idx < dd->num_sdma)
1416                 return dd->per_sdma[idx].err_cnt;
1417         return 0;
1418 }
1419
1420 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421                               void *context, int idx, int mode, u64 data)
1422 {
1423         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424
1425         if (dd->per_sdma && idx < dd->num_sdma)
1426                 return dd->per_sdma[idx].sdma_int_cnt;
1427         return 0;
1428 }
1429
1430 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431                                    void *context, int idx, int mode, u64 data)
1432 {
1433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434
1435         if (dd->per_sdma && idx < dd->num_sdma)
1436                 return dd->per_sdma[idx].idle_int_cnt;
1437         return 0;
1438 }
1439
1440 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441                                        void *context, int idx, int mode,
1442                                        u64 data)
1443 {
1444         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445
1446         if (dd->per_sdma && idx < dd->num_sdma)
1447                 return dd->per_sdma[idx].progress_int_cnt;
1448         return 0;
1449 }
1450
1451 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452                               int vl, int mode, u64 data)
1453 {
1454         struct hfi1_devdata *dd = context;
1455
1456         u64 val = 0;
1457         u64 csr = entry->csr;
1458
1459         if (entry->flags & CNTR_VL) {
1460                 if (vl == CNTR_INVALID_VL)
1461                         return 0;
1462                 csr += 8 * vl;
1463         } else {
1464                 if (vl != CNTR_INVALID_VL)
1465                         return 0;
1466         }
1467
1468         val = read_write_csr(dd, csr, mode, data);
1469         return val;
1470 }
1471
1472 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473                               int vl, int mode, u64 data)
1474 {
1475         struct hfi1_devdata *dd = context;
1476         u32 csr = entry->csr;
1477         int ret = 0;
1478
1479         if (vl != CNTR_INVALID_VL)
1480                 return 0;
1481         if (mode == CNTR_MODE_R)
1482                 ret = read_lcb_csr(dd, csr, &data);
1483         else if (mode == CNTR_MODE_W)
1484                 ret = write_lcb_csr(dd, csr, data);
1485
1486         if (ret) {
1487                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488                 return 0;
1489         }
1490
1491         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1492         return data;
1493 }
1494
1495 /* Port Access */
1496 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497                                int vl, int mode, u64 data)
1498 {
1499         struct hfi1_pportdata *ppd = context;
1500
1501         if (vl != CNTR_INVALID_VL)
1502                 return 0;
1503         return read_write_csr(ppd->dd, entry->csr, mode, data);
1504 }
1505
1506 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507                                void *context, int vl, int mode, u64 data)
1508 {
1509         struct hfi1_pportdata *ppd = context;
1510         u64 val;
1511         u64 csr = entry->csr;
1512
1513         if (entry->flags & CNTR_VL) {
1514                 if (vl == CNTR_INVALID_VL)
1515                         return 0;
1516                 csr += 8 * vl;
1517         } else {
1518                 if (vl != CNTR_INVALID_VL)
1519                         return 0;
1520         }
1521         val = read_write_csr(ppd->dd, csr, mode, data);
1522         return val;
1523 }
1524
1525 /* Software defined */
1526 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1527                                 u64 data)
1528 {
1529         u64 ret;
1530
1531         if (mode == CNTR_MODE_R) {
1532                 ret = *cntr;
1533         } else if (mode == CNTR_MODE_W) {
1534                 *cntr = data;
1535                 ret = data;
1536         } else {
1537                 dd_dev_err(dd, "Invalid cntr sw access mode");
1538                 return 0;
1539         }
1540
1541         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1542
1543         return ret;
1544 }
1545
1546 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547                                  int vl, int mode, u64 data)
1548 {
1549         struct hfi1_pportdata *ppd = context;
1550
1551         if (vl != CNTR_INVALID_VL)
1552                 return 0;
1553         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1554 }
1555
1556 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557                                  int vl, int mode, u64 data)
1558 {
1559         struct hfi1_pportdata *ppd = context;
1560
1561         if (vl != CNTR_INVALID_VL)
1562                 return 0;
1563         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1564 }
1565
1566 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567                                        void *context, int vl, int mode,
1568                                        u64 data)
1569 {
1570         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571
1572         if (vl != CNTR_INVALID_VL)
1573                 return 0;
1574         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1575 }
1576
1577 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578                                    void *context, int vl, int mode, u64 data)
1579 {
1580         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581         u64 zero = 0;
1582         u64 *counter;
1583
1584         if (vl == CNTR_INVALID_VL)
1585                 counter = &ppd->port_xmit_discards;
1586         else if (vl >= 0 && vl < C_VL_COUNT)
1587                 counter = &ppd->port_xmit_discards_vl[vl];
1588         else
1589                 counter = &zero;
1590
1591         return read_write_sw(ppd->dd, counter, mode, data);
1592 }
1593
1594 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595                                        void *context, int vl, int mode,
1596                                        u64 data)
1597 {
1598         struct hfi1_pportdata *ppd = context;
1599
1600         if (vl != CNTR_INVALID_VL)
1601                 return 0;
1602
1603         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604                              mode, data);
1605 }
1606
1607 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608                                       void *context, int vl, int mode, u64 data)
1609 {
1610         struct hfi1_pportdata *ppd = context;
1611
1612         if (vl != CNTR_INVALID_VL)
1613                 return 0;
1614
1615         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616                              mode, data);
1617 }
1618
1619 u64 get_all_cpu_total(u64 __percpu *cntr)
1620 {
1621         int cpu;
1622         u64 counter = 0;
1623
1624         for_each_possible_cpu(cpu)
1625                 counter += *per_cpu_ptr(cntr, cpu);
1626         return counter;
1627 }
1628
1629 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630                           u64 __percpu *cntr,
1631                           int vl, int mode, u64 data)
1632 {
1633         u64 ret = 0;
1634
1635         if (vl != CNTR_INVALID_VL)
1636                 return 0;
1637
1638         if (mode == CNTR_MODE_R) {
1639                 ret = get_all_cpu_total(cntr) - *z_val;
1640         } else if (mode == CNTR_MODE_W) {
1641                 /* A write can only zero the counter */
1642                 if (data == 0)
1643                         *z_val = get_all_cpu_total(cntr);
1644                 else
1645                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646         } else {
1647                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1648                 return 0;
1649         }
1650
1651         return ret;
1652 }
1653
1654 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655                               void *context, int vl, int mode, u64 data)
1656 {
1657         struct hfi1_devdata *dd = context;
1658
1659         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660                               mode, data);
1661 }
1662
1663 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664                                    void *context, int vl, int mode, u64 data)
1665 {
1666         struct hfi1_devdata *dd = context;
1667
1668         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669                               mode, data);
1670 }
1671
1672 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673                               void *context, int vl, int mode, u64 data)
1674 {
1675         struct hfi1_devdata *dd = context;
1676
1677         return dd->verbs_dev.n_piowait;
1678 }
1679
1680 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681                                void *context, int vl, int mode, u64 data)
1682 {
1683         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685         return dd->verbs_dev.n_piodrain;
1686 }
1687
1688 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1689                                    void *context, int vl, int mode, u64 data)
1690 {
1691         struct hfi1_devdata *dd = context;
1692
1693         return dd->ctx0_seq_drop;
1694 }
1695
1696 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1697                               void *context, int vl, int mode, u64 data)
1698 {
1699         struct hfi1_devdata *dd = context;
1700
1701         return dd->verbs_dev.n_txwait;
1702 }
1703
1704 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1705                                void *context, int vl, int mode, u64 data)
1706 {
1707         struct hfi1_devdata *dd = context;
1708
1709         return dd->verbs_dev.n_kmem_wait;
1710 }
1711
1712 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1713                                    void *context, int vl, int mode, u64 data)
1714 {
1715         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716
1717         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1718                               mode, data);
1719 }
1720
1721 /* Software counters for the error status bits within MISC_ERR_STATUS */
1722 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1723                                              void *context, int vl, int mode,
1724                                              u64 data)
1725 {
1726         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1727
1728         return dd->misc_err_status_cnt[12];
1729 }
1730
1731 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1732                                           void *context, int vl, int mode,
1733                                           u64 data)
1734 {
1735         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1736
1737         return dd->misc_err_status_cnt[11];
1738 }
1739
1740 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1741                                                void *context, int vl, int mode,
1742                                                u64 data)
1743 {
1744         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1745
1746         return dd->misc_err_status_cnt[10];
1747 }
1748
1749 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1750                                                  void *context, int vl,
1751                                                  int mode, u64 data)
1752 {
1753         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754
1755         return dd->misc_err_status_cnt[9];
1756 }
1757
1758 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1759                                            void *context, int vl, int mode,
1760                                            u64 data)
1761 {
1762         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1763
1764         return dd->misc_err_status_cnt[8];
1765 }
1766
1767 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1768                                 const struct cntr_entry *entry,
1769                                 void *context, int vl, int mode, u64 data)
1770 {
1771         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1772
1773         return dd->misc_err_status_cnt[7];
1774 }
1775
1776 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1777                                                 void *context, int vl,
1778                                                 int mode, u64 data)
1779 {
1780         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1781
1782         return dd->misc_err_status_cnt[6];
1783 }
1784
1785 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1786                                               void *context, int vl, int mode,
1787                                               u64 data)
1788 {
1789         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1790
1791         return dd->misc_err_status_cnt[5];
1792 }
1793
1794 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1795                                             void *context, int vl, int mode,
1796                                             u64 data)
1797 {
1798         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1799
1800         return dd->misc_err_status_cnt[4];
1801 }
1802
1803 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1804                                                  void *context, int vl,
1805                                                  int mode, u64 data)
1806 {
1807         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1808
1809         return dd->misc_err_status_cnt[3];
1810 }
1811
1812 static u64 access_misc_csr_write_bad_addr_err_cnt(
1813                                 const struct cntr_entry *entry,
1814                                 void *context, int vl, int mode, u64 data)
1815 {
1816         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1817
1818         return dd->misc_err_status_cnt[2];
1819 }
1820
1821 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1822                                                  void *context, int vl,
1823                                                  int mode, u64 data)
1824 {
1825         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1826
1827         return dd->misc_err_status_cnt[1];
1828 }
1829
1830 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1831                                           void *context, int vl, int mode,
1832                                           u64 data)
1833 {
1834         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1835
1836         return dd->misc_err_status_cnt[0];
1837 }
1838
1839 /*
1840  * Software counter for the aggregate of
1841  * individual CceErrStatus counters
1842  */
1843 static u64 access_sw_cce_err_status_aggregated_cnt(
1844                                 const struct cntr_entry *entry,
1845                                 void *context, int vl, int mode, u64 data)
1846 {
1847         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848
1849         return dd->sw_cce_err_status_aggregate;
1850 }
1851
1852 /*
1853  * Software counters corresponding to each of the
1854  * error status bits within CceErrStatus
1855  */
1856 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1857                                               void *context, int vl, int mode,
1858                                               u64 data)
1859 {
1860         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1861
1862         return dd->cce_err_status_cnt[40];
1863 }
1864
1865 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1866                                           void *context, int vl, int mode,
1867                                           u64 data)
1868 {
1869         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1870
1871         return dd->cce_err_status_cnt[39];
1872 }
1873
1874 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1875                                           void *context, int vl, int mode,
1876                                           u64 data)
1877 {
1878         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1879
1880         return dd->cce_err_status_cnt[38];
1881 }
1882
1883 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1884                                              void *context, int vl, int mode,
1885                                              u64 data)
1886 {
1887         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1888
1889         return dd->cce_err_status_cnt[37];
1890 }
1891
1892 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1893                                              void *context, int vl, int mode,
1894                                              u64 data)
1895 {
1896         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1897
1898         return dd->cce_err_status_cnt[36];
1899 }
1900
1901 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1902                                 const struct cntr_entry *entry,
1903                                 void *context, int vl, int mode, u64 data)
1904 {
1905         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1906
1907         return dd->cce_err_status_cnt[35];
1908 }
1909
1910 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1911                                 const struct cntr_entry *entry,
1912                                 void *context, int vl, int mode, u64 data)
1913 {
1914         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1915
1916         return dd->cce_err_status_cnt[34];
1917 }
1918
1919 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1920                                                  void *context, int vl,
1921                                                  int mode, u64 data)
1922 {
1923         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1924
1925         return dd->cce_err_status_cnt[33];
1926 }
1927
1928 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1929                                                 void *context, int vl, int mode,
1930                                                 u64 data)
1931 {
1932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934         return dd->cce_err_status_cnt[32];
1935 }
1936
1937 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1938                                    void *context, int vl, int mode, u64 data)
1939 {
1940         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1941
1942         return dd->cce_err_status_cnt[31];
1943 }
1944
1945 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1946                                                void *context, int vl, int mode,
1947                                                u64 data)
1948 {
1949         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1950
1951         return dd->cce_err_status_cnt[30];
1952 }
1953
1954 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1955                                               void *context, int vl, int mode,
1956                                               u64 data)
1957 {
1958         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1959
1960         return dd->cce_err_status_cnt[29];
1961 }
1962
1963 static u64 access_pcic_transmit_back_parity_err_cnt(
1964                                 const struct cntr_entry *entry,
1965                                 void *context, int vl, int mode, u64 data)
1966 {
1967         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1968
1969         return dd->cce_err_status_cnt[28];
1970 }
1971
1972 static u64 access_pcic_transmit_front_parity_err_cnt(
1973                                 const struct cntr_entry *entry,
1974                                 void *context, int vl, int mode, u64 data)
1975 {
1976         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1977
1978         return dd->cce_err_status_cnt[27];
1979 }
1980
1981 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1982                                              void *context, int vl, int mode,
1983                                              u64 data)
1984 {
1985         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1986
1987         return dd->cce_err_status_cnt[26];
1988 }
1989
1990 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1991                                             void *context, int vl, int mode,
1992                                             u64 data)
1993 {
1994         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1995
1996         return dd->cce_err_status_cnt[25];
1997 }
1998
1999 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
2000                                               void *context, int vl, int mode,
2001                                               u64 data)
2002 {
2003         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2004
2005         return dd->cce_err_status_cnt[24];
2006 }
2007
2008 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2009                                              void *context, int vl, int mode,
2010                                              u64 data)
2011 {
2012         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2013
2014         return dd->cce_err_status_cnt[23];
2015 }
2016
2017 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2018                                                  void *context, int vl,
2019                                                  int mode, u64 data)
2020 {
2021         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2022
2023         return dd->cce_err_status_cnt[22];
2024 }
2025
2026 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2027                                          void *context, int vl, int mode,
2028                                          u64 data)
2029 {
2030         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2031
2032         return dd->cce_err_status_cnt[21];
2033 }
2034
2035 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2036                                 const struct cntr_entry *entry,
2037                                 void *context, int vl, int mode, u64 data)
2038 {
2039         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2040
2041         return dd->cce_err_status_cnt[20];
2042 }
2043
2044 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2045                                                  void *context, int vl,
2046                                                  int mode, u64 data)
2047 {
2048         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2049
2050         return dd->cce_err_status_cnt[19];
2051 }
2052
2053 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2054                                              void *context, int vl, int mode,
2055                                              u64 data)
2056 {
2057         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2058
2059         return dd->cce_err_status_cnt[18];
2060 }
2061
2062 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2063                                             void *context, int vl, int mode,
2064                                             u64 data)
2065 {
2066         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2067
2068         return dd->cce_err_status_cnt[17];
2069 }
2070
2071 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2072                                               void *context, int vl, int mode,
2073                                               u64 data)
2074 {
2075         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2076
2077         return dd->cce_err_status_cnt[16];
2078 }
2079
2080 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2081                                              void *context, int vl, int mode,
2082                                              u64 data)
2083 {
2084         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2085
2086         return dd->cce_err_status_cnt[15];
2087 }
2088
2089 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2090                                                  void *context, int vl,
2091                                                  int mode, u64 data)
2092 {
2093         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2094
2095         return dd->cce_err_status_cnt[14];
2096 }
2097
2098 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2099                                              void *context, int vl, int mode,
2100                                              u64 data)
2101 {
2102         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2103
2104         return dd->cce_err_status_cnt[13];
2105 }
2106
2107 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2108                                 const struct cntr_entry *entry,
2109                                 void *context, int vl, int mode, u64 data)
2110 {
2111         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2112
2113         return dd->cce_err_status_cnt[12];
2114 }
2115
2116 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2117                                 const struct cntr_entry *entry,
2118                                 void *context, int vl, int mode, u64 data)
2119 {
2120         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2121
2122         return dd->cce_err_status_cnt[11];
2123 }
2124
2125 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2126                                 const struct cntr_entry *entry,
2127                                 void *context, int vl, int mode, u64 data)
2128 {
2129         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2130
2131         return dd->cce_err_status_cnt[10];
2132 }
2133
2134 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2135                                 const struct cntr_entry *entry,
2136                                 void *context, int vl, int mode, u64 data)
2137 {
2138         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2139
2140         return dd->cce_err_status_cnt[9];
2141 }
2142
2143 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2144                                 const struct cntr_entry *entry,
2145                                 void *context, int vl, int mode, u64 data)
2146 {
2147         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2148
2149         return dd->cce_err_status_cnt[8];
2150 }
2151
2152 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2153                                                  void *context, int vl,
2154                                                  int mode, u64 data)
2155 {
2156         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2157
2158         return dd->cce_err_status_cnt[7];
2159 }
2160
2161 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2162                                 const struct cntr_entry *entry,
2163                                 void *context, int vl, int mode, u64 data)
2164 {
2165         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2166
2167         return dd->cce_err_status_cnt[6];
2168 }
2169
2170 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2171                                                void *context, int vl, int mode,
2172                                                u64 data)
2173 {
2174         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2175
2176         return dd->cce_err_status_cnt[5];
2177 }
2178
2179 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2180                                           void *context, int vl, int mode,
2181                                           u64 data)
2182 {
2183         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2184
2185         return dd->cce_err_status_cnt[4];
2186 }
2187
2188 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2189                                 const struct cntr_entry *entry,
2190                                 void *context, int vl, int mode, u64 data)
2191 {
2192         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2193
2194         return dd->cce_err_status_cnt[3];
2195 }
2196
2197 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2198                                                  void *context, int vl,
2199                                                  int mode, u64 data)
2200 {
2201         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2202
2203         return dd->cce_err_status_cnt[2];
2204 }
2205
2206 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2207                                                 void *context, int vl,
2208                                                 int mode, u64 data)
2209 {
2210         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2211
2212         return dd->cce_err_status_cnt[1];
2213 }
2214
2215 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2216                                          void *context, int vl, int mode,
2217                                          u64 data)
2218 {
2219         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2220
2221         return dd->cce_err_status_cnt[0];
2222 }
2223
2224 /*
2225  * Software counters corresponding to each of the
2226  * error status bits within RcvErrStatus
2227  */
2228 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2229                                         void *context, int vl, int mode,
2230                                         u64 data)
2231 {
2232         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2233
2234         return dd->rcv_err_status_cnt[63];
2235 }
2236
2237 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2238                                                 void *context, int vl,
2239                                                 int mode, u64 data)
2240 {
2241         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2242
2243         return dd->rcv_err_status_cnt[62];
2244 }
2245
2246 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2247                                                void *context, int vl, int mode,
2248                                                u64 data)
2249 {
2250         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2251
2252         return dd->rcv_err_status_cnt[61];
2253 }
2254
2255 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2256                                          void *context, int vl, int mode,
2257                                          u64 data)
2258 {
2259         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2260
2261         return dd->rcv_err_status_cnt[60];
2262 }
2263
2264 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2265                                                  void *context, int vl,
2266                                                  int mode, u64 data)
2267 {
2268         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2269
2270         return dd->rcv_err_status_cnt[59];
2271 }
2272
2273 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2274                                                  void *context, int vl,
2275                                                  int mode, u64 data)
2276 {
2277         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2278
2279         return dd->rcv_err_status_cnt[58];
2280 }
2281
2282 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2283                                             void *context, int vl, int mode,
2284                                             u64 data)
2285 {
2286         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2287
2288         return dd->rcv_err_status_cnt[57];
2289 }
2290
2291 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2292                                            void *context, int vl, int mode,
2293                                            u64 data)
2294 {
2295         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2296
2297         return dd->rcv_err_status_cnt[56];
2298 }
2299
2300 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2301                                            void *context, int vl, int mode,
2302                                            u64 data)
2303 {
2304         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2305
2306         return dd->rcv_err_status_cnt[55];
2307 }
2308
2309 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2310                                 const struct cntr_entry *entry,
2311                                 void *context, int vl, int mode, u64 data)
2312 {
2313         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2314
2315         return dd->rcv_err_status_cnt[54];
2316 }
2317
2318 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2319                                 const struct cntr_entry *entry,
2320                                 void *context, int vl, int mode, u64 data)
2321 {
2322         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2323
2324         return dd->rcv_err_status_cnt[53];
2325 }
2326
2327 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2328                                                  void *context, int vl,
2329                                                  int mode, u64 data)
2330 {
2331         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2332
2333         return dd->rcv_err_status_cnt[52];
2334 }
2335
2336 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2337                                                  void *context, int vl,
2338                                                  int mode, u64 data)
2339 {
2340         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2341
2342         return dd->rcv_err_status_cnt[51];
2343 }
2344
2345 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2346                                                  void *context, int vl,
2347                                                  int mode, u64 data)
2348 {
2349         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2350
2351         return dd->rcv_err_status_cnt[50];
2352 }
2353
2354 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2355                                                  void *context, int vl,
2356                                                  int mode, u64 data)
2357 {
2358         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2359
2360         return dd->rcv_err_status_cnt[49];
2361 }
2362
2363 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2364                                                  void *context, int vl,
2365                                                  int mode, u64 data)
2366 {
2367         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2368
2369         return dd->rcv_err_status_cnt[48];
2370 }
2371
2372 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2373                                                  void *context, int vl,
2374                                                  int mode, u64 data)
2375 {
2376         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2377
2378         return dd->rcv_err_status_cnt[47];
2379 }
2380
2381 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2382                                          void *context, int vl, int mode,
2383                                          u64 data)
2384 {
2385         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2386
2387         return dd->rcv_err_status_cnt[46];
2388 }
2389
2390 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2391                                 const struct cntr_entry *entry,
2392                                 void *context, int vl, int mode, u64 data)
2393 {
2394         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2395
2396         return dd->rcv_err_status_cnt[45];
2397 }
2398
2399 static u64 access_rx_lookup_csr_parity_err_cnt(
2400                                 const struct cntr_entry *entry,
2401                                 void *context, int vl, int mode, u64 data)
2402 {
2403         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2404
2405         return dd->rcv_err_status_cnt[44];
2406 }
2407
2408 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2409                                 const struct cntr_entry *entry,
2410                                 void *context, int vl, int mode, u64 data)
2411 {
2412         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2413
2414         return dd->rcv_err_status_cnt[43];
2415 }
2416
2417 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2418                                 const struct cntr_entry *entry,
2419                                 void *context, int vl, int mode, u64 data)
2420 {
2421         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2422
2423         return dd->rcv_err_status_cnt[42];
2424 }
2425
2426 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2427                                 const struct cntr_entry *entry,
2428                                 void *context, int vl, int mode, u64 data)
2429 {
2430         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2431
2432         return dd->rcv_err_status_cnt[41];
2433 }
2434
2435 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2436                                 const struct cntr_entry *entry,
2437                                 void *context, int vl, int mode, u64 data)
2438 {
2439         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2440
2441         return dd->rcv_err_status_cnt[40];
2442 }
2443
2444 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2445                                 const struct cntr_entry *entry,
2446                                 void *context, int vl, int mode, u64 data)
2447 {
2448         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2449
2450         return dd->rcv_err_status_cnt[39];
2451 }
2452
2453 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2454                                 const struct cntr_entry *entry,
2455                                 void *context, int vl, int mode, u64 data)
2456 {
2457         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2458
2459         return dd->rcv_err_status_cnt[38];
2460 }
2461
2462 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2463                                 const struct cntr_entry *entry,
2464                                 void *context, int vl, int mode, u64 data)
2465 {
2466         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2467
2468         return dd->rcv_err_status_cnt[37];
2469 }
2470
2471 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2472                                 const struct cntr_entry *entry,
2473                                 void *context, int vl, int mode, u64 data)
2474 {
2475         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2476
2477         return dd->rcv_err_status_cnt[36];
2478 }
2479
2480 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2481                                 const struct cntr_entry *entry,
2482                                 void *context, int vl, int mode, u64 data)
2483 {
2484         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2485
2486         return dd->rcv_err_status_cnt[35];
2487 }
2488
2489 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2490                                 const struct cntr_entry *entry,
2491                                 void *context, int vl, int mode, u64 data)
2492 {
2493         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2494
2495         return dd->rcv_err_status_cnt[34];
2496 }
2497
2498 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2499                                 const struct cntr_entry *entry,
2500                                 void *context, int vl, int mode, u64 data)
2501 {
2502         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2503
2504         return dd->rcv_err_status_cnt[33];
2505 }
2506
2507 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2508                                         void *context, int vl, int mode,
2509                                         u64 data)
2510 {
2511         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2512
2513         return dd->rcv_err_status_cnt[32];
2514 }
2515
2516 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2517                                        void *context, int vl, int mode,
2518                                        u64 data)
2519 {
2520         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2521
2522         return dd->rcv_err_status_cnt[31];
2523 }
2524
2525 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2526                                           void *context, int vl, int mode,
2527                                           u64 data)
2528 {
2529         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2530
2531         return dd->rcv_err_status_cnt[30];
2532 }
2533
2534 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2535                                              void *context, int vl, int mode,
2536                                              u64 data)
2537 {
2538         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2539
2540         return dd->rcv_err_status_cnt[29];
2541 }
2542
2543 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2544                                                  void *context, int vl,
2545                                                  int mode, u64 data)
2546 {
2547         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2548
2549         return dd->rcv_err_status_cnt[28];
2550 }
2551
2552 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2553                                 const struct cntr_entry *entry,
2554                                 void *context, int vl, int mode, u64 data)
2555 {
2556         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2557
2558         return dd->rcv_err_status_cnt[27];
2559 }
2560
2561 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2562                                 const struct cntr_entry *entry,
2563                                 void *context, int vl, int mode, u64 data)
2564 {
2565         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2566
2567         return dd->rcv_err_status_cnt[26];
2568 }
2569
2570 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2571                                 const struct cntr_entry *entry,
2572                                 void *context, int vl, int mode, u64 data)
2573 {
2574         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2575
2576         return dd->rcv_err_status_cnt[25];
2577 }
2578
2579 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2580                                 const struct cntr_entry *entry,
2581                                 void *context, int vl, int mode, u64 data)
2582 {
2583         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2584
2585         return dd->rcv_err_status_cnt[24];
2586 }
2587
2588 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2589                                 const struct cntr_entry *entry,
2590                                 void *context, int vl, int mode, u64 data)
2591 {
2592         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2593
2594         return dd->rcv_err_status_cnt[23];
2595 }
2596
2597 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2598                                 const struct cntr_entry *entry,
2599                                 void *context, int vl, int mode, u64 data)
2600 {
2601         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2602
2603         return dd->rcv_err_status_cnt[22];
2604 }
2605
2606 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2607                                 const struct cntr_entry *entry,
2608                                 void *context, int vl, int mode, u64 data)
2609 {
2610         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2611
2612         return dd->rcv_err_status_cnt[21];
2613 }
2614
2615 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2616                                 const struct cntr_entry *entry,
2617                                 void *context, int vl, int mode, u64 data)
2618 {
2619         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2620
2621         return dd->rcv_err_status_cnt[20];
2622 }
2623
2624 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2625                                 const struct cntr_entry *entry,
2626                                 void *context, int vl, int mode, u64 data)
2627 {
2628         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2629
2630         return dd->rcv_err_status_cnt[19];
2631 }
2632
2633 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2634                                                  void *context, int vl,
2635                                                  int mode, u64 data)
2636 {
2637         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2638
2639         return dd->rcv_err_status_cnt[18];
2640 }
2641
2642 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2643                                                  void *context, int vl,
2644                                                  int mode, u64 data)
2645 {
2646         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2647
2648         return dd->rcv_err_status_cnt[17];
2649 }
2650
2651 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2652                                 const struct cntr_entry *entry,
2653                                 void *context, int vl, int mode, u64 data)
2654 {
2655         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2656
2657         return dd->rcv_err_status_cnt[16];
2658 }
2659
2660 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2661                                 const struct cntr_entry *entry,
2662                                 void *context, int vl, int mode, u64 data)
2663 {
2664         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2665
2666         return dd->rcv_err_status_cnt[15];
2667 }
2668
2669 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2670                                                 void *context, int vl,
2671                                                 int mode, u64 data)
2672 {
2673         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2674
2675         return dd->rcv_err_status_cnt[14];
2676 }
2677
2678 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2679                                                 void *context, int vl,
2680                                                 int mode, u64 data)
2681 {
2682         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2683
2684         return dd->rcv_err_status_cnt[13];
2685 }
2686
2687 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2688                                               void *context, int vl, int mode,
2689                                               u64 data)
2690 {
2691         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2692
2693         return dd->rcv_err_status_cnt[12];
2694 }
2695
2696 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2697                                           void *context, int vl, int mode,
2698                                           u64 data)
2699 {
2700         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2701
2702         return dd->rcv_err_status_cnt[11];
2703 }
2704
2705 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2706                                           void *context, int vl, int mode,
2707                                           u64 data)
2708 {
2709         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2710
2711         return dd->rcv_err_status_cnt[10];
2712 }
2713
2714 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2715                                                void *context, int vl, int mode,
2716                                                u64 data)
2717 {
2718         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2719
2720         return dd->rcv_err_status_cnt[9];
2721 }
2722
2723 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2724                                             void *context, int vl, int mode,
2725                                             u64 data)
2726 {
2727         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2728
2729         return dd->rcv_err_status_cnt[8];
2730 }
2731
2732 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2733                                 const struct cntr_entry *entry,
2734                                 void *context, int vl, int mode, u64 data)
2735 {
2736         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2737
2738         return dd->rcv_err_status_cnt[7];
2739 }
2740
2741 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2742                                 const struct cntr_entry *entry,
2743                                 void *context, int vl, int mode, u64 data)
2744 {
2745         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2746
2747         return dd->rcv_err_status_cnt[6];
2748 }
2749
2750 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2751                                           void *context, int vl, int mode,
2752                                           u64 data)
2753 {
2754         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2755
2756         return dd->rcv_err_status_cnt[5];
2757 }
2758
2759 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2760                                           void *context, int vl, int mode,
2761                                           u64 data)
2762 {
2763         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2764
2765         return dd->rcv_err_status_cnt[4];
2766 }
2767
2768 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2769                                          void *context, int vl, int mode,
2770                                          u64 data)
2771 {
2772         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2773
2774         return dd->rcv_err_status_cnt[3];
2775 }
2776
2777 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2778                                          void *context, int vl, int mode,
2779                                          u64 data)
2780 {
2781         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2782
2783         return dd->rcv_err_status_cnt[2];
2784 }
2785
2786 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2787                                             void *context, int vl, int mode,
2788                                             u64 data)
2789 {
2790         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2791
2792         return dd->rcv_err_status_cnt[1];
2793 }
2794
2795 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2796                                          void *context, int vl, int mode,
2797                                          u64 data)
2798 {
2799         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2800
2801         return dd->rcv_err_status_cnt[0];
2802 }
2803
2804 /*
2805  * Software counters corresponding to each of the
2806  * error status bits within SendPioErrStatus
2807  */
2808 static u64 access_pio_pec_sop_head_parity_err_cnt(
2809                                 const struct cntr_entry *entry,
2810                                 void *context, int vl, int mode, u64 data)
2811 {
2812         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2813
2814         return dd->send_pio_err_status_cnt[35];
2815 }
2816
2817 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2818                                 const struct cntr_entry *entry,
2819                                 void *context, int vl, int mode, u64 data)
2820 {
2821         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2822
2823         return dd->send_pio_err_status_cnt[34];
2824 }
2825
2826 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2827                                 const struct cntr_entry *entry,
2828                                 void *context, int vl, int mode, u64 data)
2829 {
2830         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2831
2832         return dd->send_pio_err_status_cnt[33];
2833 }
2834
2835 static u64 access_pio_current_free_cnt_parity_err_cnt(
2836                                 const struct cntr_entry *entry,
2837                                 void *context, int vl, int mode, u64 data)
2838 {
2839         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2840
2841         return dd->send_pio_err_status_cnt[32];
2842 }
2843
2844 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2845                                           void *context, int vl, int mode,
2846                                           u64 data)
2847 {
2848         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2849
2850         return dd->send_pio_err_status_cnt[31];
2851 }
2852
2853 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2854                                           void *context, int vl, int mode,
2855                                           u64 data)
2856 {
2857         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2858
2859         return dd->send_pio_err_status_cnt[30];
2860 }
2861
2862 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2863                                            void *context, int vl, int mode,
2864                                            u64 data)
2865 {
2866         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2867
2868         return dd->send_pio_err_status_cnt[29];
2869 }
2870
2871 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2872                                 const struct cntr_entry *entry,
2873                                 void *context, int vl, int mode, u64 data)
2874 {
2875         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2876
2877         return dd->send_pio_err_status_cnt[28];
2878 }
2879
2880 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2881                                              void *context, int vl, int mode,
2882                                              u64 data)
2883 {
2884         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2885
2886         return dd->send_pio_err_status_cnt[27];
2887 }
2888
2889 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2890                                              void *context, int vl, int mode,
2891                                              u64 data)
2892 {
2893         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2894
2895         return dd->send_pio_err_status_cnt[26];
2896 }
2897
2898 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2899                                                 void *context, int vl,
2900                                                 int mode, u64 data)
2901 {
2902         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2903
2904         return dd->send_pio_err_status_cnt[25];
2905 }
2906
2907 static u64 access_pio_block_qw_count_parity_err_cnt(
2908                                 const struct cntr_entry *entry,
2909                                 void *context, int vl, int mode, u64 data)
2910 {
2911         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2912
2913         return dd->send_pio_err_status_cnt[24];
2914 }
2915
2916 static u64 access_pio_write_qw_valid_parity_err_cnt(
2917                                 const struct cntr_entry *entry,
2918                                 void *context, int vl, int mode, u64 data)
2919 {
2920         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2921
2922         return dd->send_pio_err_status_cnt[23];
2923 }
2924
2925 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2926                                             void *context, int vl, int mode,
2927                                             u64 data)
2928 {
2929         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2930
2931         return dd->send_pio_err_status_cnt[22];
2932 }
2933
2934 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2935                                                 void *context, int vl,
2936                                                 int mode, u64 data)
2937 {
2938         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2939
2940         return dd->send_pio_err_status_cnt[21];
2941 }
2942
2943 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2944                                                 void *context, int vl,
2945                                                 int mode, u64 data)
2946 {
2947         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2948
2949         return dd->send_pio_err_status_cnt[20];
2950 }
2951
2952 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2953                                                 void *context, int vl,
2954                                                 int mode, u64 data)
2955 {
2956         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2957
2958         return dd->send_pio_err_status_cnt[19];
2959 }
2960
2961 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2962                                 const struct cntr_entry *entry,
2963                                 void *context, int vl, int mode, u64 data)
2964 {
2965         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2966
2967         return dd->send_pio_err_status_cnt[18];
2968 }
2969
2970 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2971                                          void *context, int vl, int mode,
2972                                          u64 data)
2973 {
2974         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2975
2976         return dd->send_pio_err_status_cnt[17];
2977 }
2978
2979 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2980                                             void *context, int vl, int mode,
2981                                             u64 data)
2982 {
2983         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2984
2985         return dd->send_pio_err_status_cnt[16];
2986 }
2987
2988 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2989                                 const struct cntr_entry *entry,
2990                                 void *context, int vl, int mode, u64 data)
2991 {
2992         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2993
2994         return dd->send_pio_err_status_cnt[15];
2995 }
2996
2997 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2998                                 const struct cntr_entry *entry,
2999                                 void *context, int vl, int mode, u64 data)
3000 {
3001         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3002
3003         return dd->send_pio_err_status_cnt[14];
3004 }
3005
3006 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
3007                                 const struct cntr_entry *entry,
3008                                 void *context, int vl, int mode, u64 data)
3009 {
3010         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3011
3012         return dd->send_pio_err_status_cnt[13];
3013 }
3014
3015 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3016                                 const struct cntr_entry *entry,
3017                                 void *context, int vl, int mode, u64 data)
3018 {
3019         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3020
3021         return dd->send_pio_err_status_cnt[12];
3022 }
3023
3024 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3025                                 const struct cntr_entry *entry,
3026                                 void *context, int vl, int mode, u64 data)
3027 {
3028         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3029
3030         return dd->send_pio_err_status_cnt[11];
3031 }
3032
3033 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3034                                 const struct cntr_entry *entry,
3035                                 void *context, int vl, int mode, u64 data)
3036 {
3037         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3038
3039         return dd->send_pio_err_status_cnt[10];
3040 }
3041
3042 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3043                                 const struct cntr_entry *entry,
3044                                 void *context, int vl, int mode, u64 data)
3045 {
3046         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3047
3048         return dd->send_pio_err_status_cnt[9];
3049 }
3050
3051 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3052                                 const struct cntr_entry *entry,
3053                                 void *context, int vl, int mode, u64 data)
3054 {
3055         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3056
3057         return dd->send_pio_err_status_cnt[8];
3058 }
3059
3060 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3061                                 const struct cntr_entry *entry,
3062                                 void *context, int vl, int mode, u64 data)
3063 {
3064         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3065
3066         return dd->send_pio_err_status_cnt[7];
3067 }
3068
3069 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3070                                               void *context, int vl, int mode,
3071                                               u64 data)
3072 {
3073         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074
3075         return dd->send_pio_err_status_cnt[6];
3076 }
3077
3078 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3079                                               void *context, int vl, int mode,
3080                                               u64 data)
3081 {
3082         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3083
3084         return dd->send_pio_err_status_cnt[5];
3085 }
3086
3087 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3088                                            void *context, int vl, int mode,
3089                                            u64 data)
3090 {
3091         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3092
3093         return dd->send_pio_err_status_cnt[4];
3094 }
3095
3096 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3097                                            void *context, int vl, int mode,
3098                                            u64 data)
3099 {
3100         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3101
3102         return dd->send_pio_err_status_cnt[3];
3103 }
3104
3105 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3106                                          void *context, int vl, int mode,
3107                                          u64 data)
3108 {
3109         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3110
3111         return dd->send_pio_err_status_cnt[2];
3112 }
3113
3114 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3115                                                 void *context, int vl,
3116                                                 int mode, u64 data)
3117 {
3118         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3119
3120         return dd->send_pio_err_status_cnt[1];
3121 }
3122
3123 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3124                                              void *context, int vl, int mode,
3125                                              u64 data)
3126 {
3127         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3128
3129         return dd->send_pio_err_status_cnt[0];
3130 }
3131
3132 /*
3133  * Software counters corresponding to each of the
3134  * error status bits within SendDmaErrStatus
3135  */
3136 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3137                                 const struct cntr_entry *entry,
3138                                 void *context, int vl, int mode, u64 data)
3139 {
3140         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3141
3142         return dd->send_dma_err_status_cnt[3];
3143 }
3144
3145 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3146                                 const struct cntr_entry *entry,
3147                                 void *context, int vl, int mode, u64 data)
3148 {
3149         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3150
3151         return dd->send_dma_err_status_cnt[2];
3152 }
3153
3154 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3155                                           void *context, int vl, int mode,
3156                                           u64 data)
3157 {
3158         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3159
3160         return dd->send_dma_err_status_cnt[1];
3161 }
3162
3163 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3164                                        void *context, int vl, int mode,
3165                                        u64 data)
3166 {
3167         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3168
3169         return dd->send_dma_err_status_cnt[0];
3170 }
3171
3172 /*
3173  * Software counters corresponding to each of the
3174  * error status bits within SendEgressErrStatus
3175  */
3176 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3177                                 const struct cntr_entry *entry,
3178                                 void *context, int vl, int mode, u64 data)
3179 {
3180         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3181
3182         return dd->send_egress_err_status_cnt[63];
3183 }
3184
3185 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3186                                 const struct cntr_entry *entry,
3187                                 void *context, int vl, int mode, u64 data)
3188 {
3189         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3190
3191         return dd->send_egress_err_status_cnt[62];
3192 }
3193
3194 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3195                                              void *context, int vl, int mode,
3196                                              u64 data)
3197 {
3198         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3199
3200         return dd->send_egress_err_status_cnt[61];
3201 }
3202
3203 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3204                                                  void *context, int vl,
3205                                                  int mode, u64 data)
3206 {
3207         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3208
3209         return dd->send_egress_err_status_cnt[60];
3210 }
3211
3212 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3213                                 const struct cntr_entry *entry,
3214                                 void *context, int vl, int mode, u64 data)
3215 {
3216         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3217
3218         return dd->send_egress_err_status_cnt[59];
3219 }
3220
3221 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3222                                         void *context, int vl, int mode,
3223                                         u64 data)
3224 {
3225         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3226
3227         return dd->send_egress_err_status_cnt[58];
3228 }
3229
3230 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3231                                             void *context, int vl, int mode,
3232                                             u64 data)
3233 {
3234         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3235
3236         return dd->send_egress_err_status_cnt[57];
3237 }
3238
3239 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3240                                               void *context, int vl, int mode,
3241                                               u64 data)
3242 {
3243         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3244
3245         return dd->send_egress_err_status_cnt[56];
3246 }
3247
3248 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3249                                               void *context, int vl, int mode,
3250                                               u64 data)
3251 {
3252         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3253
3254         return dd->send_egress_err_status_cnt[55];
3255 }
3256
3257 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3258                                               void *context, int vl, int mode,
3259                                               u64 data)
3260 {
3261         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3262
3263         return dd->send_egress_err_status_cnt[54];
3264 }
3265
3266 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3267                                               void *context, int vl, int mode,
3268                                               u64 data)
3269 {
3270         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3271
3272         return dd->send_egress_err_status_cnt[53];
3273 }
3274
3275 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3276                                               void *context, int vl, int mode,
3277                                               u64 data)
3278 {
3279         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3280
3281         return dd->send_egress_err_status_cnt[52];
3282 }
3283
3284 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3285                                               void *context, int vl, int mode,
3286                                               u64 data)
3287 {
3288         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3289
3290         return dd->send_egress_err_status_cnt[51];
3291 }
3292
3293 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3294                                               void *context, int vl, int mode,
3295                                               u64 data)
3296 {
3297         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3298
3299         return dd->send_egress_err_status_cnt[50];
3300 }
3301
3302 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3303                                               void *context, int vl, int mode,
3304                                               u64 data)
3305 {
3306         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3307
3308         return dd->send_egress_err_status_cnt[49];
3309 }
3310
3311 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3312                                               void *context, int vl, int mode,
3313                                               u64 data)
3314 {
3315         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3316
3317         return dd->send_egress_err_status_cnt[48];
3318 }
3319
3320 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3321                                               void *context, int vl, int mode,
3322                                               u64 data)
3323 {
3324         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3325
3326         return dd->send_egress_err_status_cnt[47];
3327 }
3328
3329 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3330                                             void *context, int vl, int mode,
3331                                             u64 data)
3332 {
3333         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3334
3335         return dd->send_egress_err_status_cnt[46];
3336 }
3337
3338 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3339                                              void *context, int vl, int mode,
3340                                              u64 data)
3341 {
3342         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3343
3344         return dd->send_egress_err_status_cnt[45];
3345 }
3346
3347 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3348                                                  void *context, int vl,
3349                                                  int mode, u64 data)
3350 {
3351         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3352
3353         return dd->send_egress_err_status_cnt[44];
3354 }
3355
3356 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3357                                 const struct cntr_entry *entry,
3358                                 void *context, int vl, int mode, u64 data)
3359 {
3360         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3361
3362         return dd->send_egress_err_status_cnt[43];
3363 }
3364
3365 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3366                                         void *context, int vl, int mode,
3367                                         u64 data)
3368 {
3369         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3370
3371         return dd->send_egress_err_status_cnt[42];
3372 }
3373
3374 static u64 access_tx_credit_return_partiy_err_cnt(
3375                                 const struct cntr_entry *entry,
3376                                 void *context, int vl, int mode, u64 data)
3377 {
3378         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3379
3380         return dd->send_egress_err_status_cnt[41];
3381 }
3382
3383 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3384                                 const struct cntr_entry *entry,
3385                                 void *context, int vl, int mode, u64 data)
3386 {
3387         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3388
3389         return dd->send_egress_err_status_cnt[40];
3390 }
3391
3392 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3393                                 const struct cntr_entry *entry,
3394                                 void *context, int vl, int mode, u64 data)
3395 {
3396         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3397
3398         return dd->send_egress_err_status_cnt[39];
3399 }
3400
3401 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3402                                 const struct cntr_entry *entry,
3403                                 void *context, int vl, int mode, u64 data)
3404 {
3405         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3406
3407         return dd->send_egress_err_status_cnt[38];
3408 }
3409
3410 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3411                                 const struct cntr_entry *entry,
3412                                 void *context, int vl, int mode, u64 data)
3413 {
3414         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3415
3416         return dd->send_egress_err_status_cnt[37];
3417 }
3418
3419 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3420                                 const struct cntr_entry *entry,
3421                                 void *context, int vl, int mode, u64 data)
3422 {
3423         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3424
3425         return dd->send_egress_err_status_cnt[36];
3426 }
3427
3428 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3429                                 const struct cntr_entry *entry,
3430                                 void *context, int vl, int mode, u64 data)
3431 {
3432         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3433
3434         return dd->send_egress_err_status_cnt[35];
3435 }
3436
3437 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3438                                 const struct cntr_entry *entry,
3439                                 void *context, int vl, int mode, u64 data)
3440 {
3441         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3442
3443         return dd->send_egress_err_status_cnt[34];
3444 }
3445
3446 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3447                                 const struct cntr_entry *entry,
3448                                 void *context, int vl, int mode, u64 data)
3449 {
3450         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3451
3452         return dd->send_egress_err_status_cnt[33];
3453 }
3454
3455 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3456                                 const struct cntr_entry *entry,
3457                                 void *context, int vl, int mode, u64 data)
3458 {
3459         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3460
3461         return dd->send_egress_err_status_cnt[32];
3462 }
3463
3464 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3465                                 const struct cntr_entry *entry,
3466                                 void *context, int vl, int mode, u64 data)
3467 {
3468         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3469
3470         return dd->send_egress_err_status_cnt[31];
3471 }
3472
3473 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3474                                 const struct cntr_entry *entry,
3475                                 void *context, int vl, int mode, u64 data)
3476 {
3477         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3478
3479         return dd->send_egress_err_status_cnt[30];
3480 }
3481
3482 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3483                                 const struct cntr_entry *entry,
3484                                 void *context, int vl, int mode, u64 data)
3485 {
3486         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3487
3488         return dd->send_egress_err_status_cnt[29];
3489 }
3490
3491 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3492                                 const struct cntr_entry *entry,
3493                                 void *context, int vl, int mode, u64 data)
3494 {
3495         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3496
3497         return dd->send_egress_err_status_cnt[28];
3498 }
3499
3500 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3501                                 const struct cntr_entry *entry,
3502                                 void *context, int vl, int mode, u64 data)
3503 {
3504         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3505
3506         return dd->send_egress_err_status_cnt[27];
3507 }
3508
3509 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3510                                 const struct cntr_entry *entry,
3511                                 void *context, int vl, int mode, u64 data)
3512 {
3513         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3514
3515         return dd->send_egress_err_status_cnt[26];
3516 }
3517
3518 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3519                                 const struct cntr_entry *entry,
3520                                 void *context, int vl, int mode, u64 data)
3521 {
3522         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3523
3524         return dd->send_egress_err_status_cnt[25];
3525 }
3526
3527 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3528                                 const struct cntr_entry *entry,
3529                                 void *context, int vl, int mode, u64 data)
3530 {
3531         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3532
3533         return dd->send_egress_err_status_cnt[24];
3534 }
3535
3536 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3537                                 const struct cntr_entry *entry,
3538                                 void *context, int vl, int mode, u64 data)
3539 {
3540         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3541
3542         return dd->send_egress_err_status_cnt[23];
3543 }
3544
3545 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3546                                 const struct cntr_entry *entry,
3547                                 void *context, int vl, int mode, u64 data)
3548 {
3549         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3550
3551         return dd->send_egress_err_status_cnt[22];
3552 }
3553
3554 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3555                                 const struct cntr_entry *entry,
3556                                 void *context, int vl, int mode, u64 data)
3557 {
3558         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3559
3560         return dd->send_egress_err_status_cnt[21];
3561 }
3562
3563 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3564                                 const struct cntr_entry *entry,
3565                                 void *context, int vl, int mode, u64 data)
3566 {
3567         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3568
3569         return dd->send_egress_err_status_cnt[20];
3570 }
3571
3572 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3573                                 const struct cntr_entry *entry,
3574                                 void *context, int vl, int mode, u64 data)
3575 {
3576         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3577
3578         return dd->send_egress_err_status_cnt[19];
3579 }
3580
3581 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3582                                 const struct cntr_entry *entry,
3583                                 void *context, int vl, int mode, u64 data)
3584 {
3585         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3586
3587         return dd->send_egress_err_status_cnt[18];
3588 }
3589
3590 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3591                                 const struct cntr_entry *entry,
3592                                 void *context, int vl, int mode, u64 data)
3593 {
3594         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3595
3596         return dd->send_egress_err_status_cnt[17];
3597 }
3598
3599 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3600                                 const struct cntr_entry *entry,
3601                                 void *context, int vl, int mode, u64 data)
3602 {
3603         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3604
3605         return dd->send_egress_err_status_cnt[16];
3606 }
3607
3608 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3609                                            void *context, int vl, int mode,
3610                                            u64 data)
3611 {
3612         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3613
3614         return dd->send_egress_err_status_cnt[15];
3615 }
3616
3617 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3618                                                  void *context, int vl,
3619                                                  int mode, u64 data)
3620 {
3621         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3622
3623         return dd->send_egress_err_status_cnt[14];
3624 }
3625
3626 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3627                                                void *context, int vl, int mode,
3628                                                u64 data)
3629 {
3630         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3631
3632         return dd->send_egress_err_status_cnt[13];
3633 }
3634
3635 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3636                                         void *context, int vl, int mode,
3637                                         u64 data)
3638 {
3639         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3640
3641         return dd->send_egress_err_status_cnt[12];
3642 }
3643
3644 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3645                                 const struct cntr_entry *entry,
3646                                 void *context, int vl, int mode, u64 data)
3647 {
3648         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3649
3650         return dd->send_egress_err_status_cnt[11];
3651 }
3652
3653 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3654                                              void *context, int vl, int mode,
3655                                              u64 data)
3656 {
3657         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3658
3659         return dd->send_egress_err_status_cnt[10];
3660 }
3661
3662 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3663                                             void *context, int vl, int mode,
3664                                             u64 data)
3665 {
3666         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3667
3668         return dd->send_egress_err_status_cnt[9];
3669 }
3670
3671 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3672                                 const struct cntr_entry *entry,
3673                                 void *context, int vl, int mode, u64 data)
3674 {
3675         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3676
3677         return dd->send_egress_err_status_cnt[8];
3678 }
3679
3680 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3681                                 const struct cntr_entry *entry,
3682                                 void *context, int vl, int mode, u64 data)
3683 {
3684         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685
3686         return dd->send_egress_err_status_cnt[7];
3687 }
3688
3689 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3690                                             void *context, int vl, int mode,
3691                                             u64 data)
3692 {
3693         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3694
3695         return dd->send_egress_err_status_cnt[6];
3696 }
3697
3698 static u64 access_tx_incorrect_link_state_err_cnt(
3699                                 const struct cntr_entry *entry,
3700                                 void *context, int vl, int mode, u64 data)
3701 {
3702         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3703
3704         return dd->send_egress_err_status_cnt[5];
3705 }
3706
3707 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3708                                       void *context, int vl, int mode,
3709                                       u64 data)
3710 {
3711         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3712
3713         return dd->send_egress_err_status_cnt[4];
3714 }
3715
3716 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3717                                 const struct cntr_entry *entry,
3718                                 void *context, int vl, int mode, u64 data)
3719 {
3720         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3721
3722         return dd->send_egress_err_status_cnt[3];
3723 }
3724
3725 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3726                                             void *context, int vl, int mode,
3727                                             u64 data)
3728 {
3729         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3730
3731         return dd->send_egress_err_status_cnt[2];
3732 }
3733
3734 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3735                                 const struct cntr_entry *entry,
3736                                 void *context, int vl, int mode, u64 data)
3737 {
3738         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739
3740         return dd->send_egress_err_status_cnt[1];
3741 }
3742
3743 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3744                                 const struct cntr_entry *entry,
3745                                 void *context, int vl, int mode, u64 data)
3746 {
3747         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748
3749         return dd->send_egress_err_status_cnt[0];
3750 }
3751
3752 /*
3753  * Software counters corresponding to each of the
3754  * error status bits within SendErrStatus
3755  */
3756 static u64 access_send_csr_write_bad_addr_err_cnt(
3757                                 const struct cntr_entry *entry,
3758                                 void *context, int vl, int mode, u64 data)
3759 {
3760         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3761
3762         return dd->send_err_status_cnt[2];
3763 }
3764
3765 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3766                                                  void *context, int vl,
3767                                                  int mode, u64 data)
3768 {
3769         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3770
3771         return dd->send_err_status_cnt[1];
3772 }
3773
3774 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3775                                       void *context, int vl, int mode,
3776                                       u64 data)
3777 {
3778         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3779
3780         return dd->send_err_status_cnt[0];
3781 }
3782
3783 /*
3784  * Software counters corresponding to each of the
3785  * error status bits within SendCtxtErrStatus
3786  */
3787 static u64 access_pio_write_out_of_bounds_err_cnt(
3788                                 const struct cntr_entry *entry,
3789                                 void *context, int vl, int mode, u64 data)
3790 {
3791         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3792
3793         return dd->sw_ctxt_err_status_cnt[4];
3794 }
3795
3796 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3797                                              void *context, int vl, int mode,
3798                                              u64 data)
3799 {
3800         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3801
3802         return dd->sw_ctxt_err_status_cnt[3];
3803 }
3804
3805 static u64 access_pio_write_crosses_boundary_err_cnt(
3806                                 const struct cntr_entry *entry,
3807                                 void *context, int vl, int mode, u64 data)
3808 {
3809         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3810
3811         return dd->sw_ctxt_err_status_cnt[2];
3812 }
3813
3814 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3815                                                 void *context, int vl,
3816                                                 int mode, u64 data)
3817 {
3818         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3819
3820         return dd->sw_ctxt_err_status_cnt[1];
3821 }
3822
3823 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3824                                                void *context, int vl, int mode,
3825                                                u64 data)
3826 {
3827         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3828
3829         return dd->sw_ctxt_err_status_cnt[0];
3830 }
3831
3832 /*
3833  * Software counters corresponding to each of the
3834  * error status bits within SendDmaEngErrStatus
3835  */
3836 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3837                                 const struct cntr_entry *entry,
3838                                 void *context, int vl, int mode, u64 data)
3839 {
3840         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3841
3842         return dd->sw_send_dma_eng_err_status_cnt[23];
3843 }
3844
3845 static u64 access_sdma_header_storage_cor_err_cnt(
3846                                 const struct cntr_entry *entry,
3847                                 void *context, int vl, int mode, u64 data)
3848 {
3849         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3850
3851         return dd->sw_send_dma_eng_err_status_cnt[22];
3852 }
3853
3854 static u64 access_sdma_packet_tracking_cor_err_cnt(
3855                                 const struct cntr_entry *entry,
3856                                 void *context, int vl, int mode, u64 data)
3857 {
3858         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3859
3860         return dd->sw_send_dma_eng_err_status_cnt[21];
3861 }
3862
3863 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3864                                             void *context, int vl, int mode,
3865                                             u64 data)
3866 {
3867         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3868
3869         return dd->sw_send_dma_eng_err_status_cnt[20];
3870 }
3871
3872 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3873                                               void *context, int vl, int mode,
3874                                               u64 data)
3875 {
3876         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3877
3878         return dd->sw_send_dma_eng_err_status_cnt[19];
3879 }
3880
3881 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3882                                 const struct cntr_entry *entry,
3883                                 void *context, int vl, int mode, u64 data)
3884 {
3885         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3886
3887         return dd->sw_send_dma_eng_err_status_cnt[18];
3888 }
3889
3890 static u64 access_sdma_header_storage_unc_err_cnt(
3891                                 const struct cntr_entry *entry,
3892                                 void *context, int vl, int mode, u64 data)
3893 {
3894         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3895
3896         return dd->sw_send_dma_eng_err_status_cnt[17];
3897 }
3898
3899 static u64 access_sdma_packet_tracking_unc_err_cnt(
3900                                 const struct cntr_entry *entry,
3901                                 void *context, int vl, int mode, u64 data)
3902 {
3903         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3904
3905         return dd->sw_send_dma_eng_err_status_cnt[16];
3906 }
3907
3908 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3909                                             void *context, int vl, int mode,
3910                                             u64 data)
3911 {
3912         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3913
3914         return dd->sw_send_dma_eng_err_status_cnt[15];
3915 }
3916
3917 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3918                                               void *context, int vl, int mode,
3919                                               u64 data)
3920 {
3921         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3922
3923         return dd->sw_send_dma_eng_err_status_cnt[14];
3924 }
3925
3926 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3927                                        void *context, int vl, int mode,
3928                                        u64 data)
3929 {
3930         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3931
3932         return dd->sw_send_dma_eng_err_status_cnt[13];
3933 }
3934
3935 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3936                                              void *context, int vl, int mode,
3937                                              u64 data)
3938 {
3939         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3940
3941         return dd->sw_send_dma_eng_err_status_cnt[12];
3942 }
3943
3944 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3945                                               void *context, int vl, int mode,
3946                                               u64 data)
3947 {
3948         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3949
3950         return dd->sw_send_dma_eng_err_status_cnt[11];
3951 }
3952
3953 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3954                                              void *context, int vl, int mode,
3955                                              u64 data)
3956 {
3957         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3958
3959         return dd->sw_send_dma_eng_err_status_cnt[10];
3960 }
3961
3962 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3963                                           void *context, int vl, int mode,
3964                                           u64 data)
3965 {
3966         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3967
3968         return dd->sw_send_dma_eng_err_status_cnt[9];
3969 }
3970
3971 static u64 access_sdma_packet_desc_overflow_err_cnt(
3972                                 const struct cntr_entry *entry,
3973                                 void *context, int vl, int mode, u64 data)
3974 {
3975         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3976
3977         return dd->sw_send_dma_eng_err_status_cnt[8];
3978 }
3979
3980 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3981                                                void *context, int vl,
3982                                                int mode, u64 data)
3983 {
3984         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985
3986         return dd->sw_send_dma_eng_err_status_cnt[7];
3987 }
3988
3989 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3990                                     void *context, int vl, int mode, u64 data)
3991 {
3992         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3993
3994         return dd->sw_send_dma_eng_err_status_cnt[6];
3995 }
3996
3997 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3998                                         void *context, int vl, int mode,
3999                                         u64 data)
4000 {
4001         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4002
4003         return dd->sw_send_dma_eng_err_status_cnt[5];
4004 }
4005
4006 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
4007                                           void *context, int vl, int mode,
4008                                           u64 data)
4009 {
4010         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4011
4012         return dd->sw_send_dma_eng_err_status_cnt[4];
4013 }
4014
4015 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4016                                 const struct cntr_entry *entry,
4017                                 void *context, int vl, int mode, u64 data)
4018 {
4019         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4020
4021         return dd->sw_send_dma_eng_err_status_cnt[3];
4022 }
4023
4024 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4025                                         void *context, int vl, int mode,
4026                                         u64 data)
4027 {
4028         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4029
4030         return dd->sw_send_dma_eng_err_status_cnt[2];
4031 }
4032
4033 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4034                                             void *context, int vl, int mode,
4035                                             u64 data)
4036 {
4037         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4038
4039         return dd->sw_send_dma_eng_err_status_cnt[1];
4040 }
4041
4042 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4043                                         void *context, int vl, int mode,
4044                                         u64 data)
4045 {
4046         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4047
4048         return dd->sw_send_dma_eng_err_status_cnt[0];
4049 }
4050
4051 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4052                                  void *context, int vl, int mode,
4053                                  u64 data)
4054 {
4055         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4056
4057         u64 val = 0;
4058         u64 csr = entry->csr;
4059
4060         val = read_write_csr(dd, csr, mode, data);
4061         if (mode == CNTR_MODE_R) {
4062                 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4063                         CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4064         } else if (mode == CNTR_MODE_W) {
4065                 dd->sw_rcv_bypass_packet_errors = 0;
4066         } else {
4067                 dd_dev_err(dd, "Invalid cntr register access mode");
4068                 return 0;
4069         }
4070         return val;
4071 }
4072
4073 #define def_access_sw_cpu(cntr) \
4074 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
4075                               void *context, int vl, int mode, u64 data)      \
4076 {                                                                             \
4077         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4078         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
4079                               ppd->ibport_data.rvp.cntr, vl,                  \
4080                               mode, data);                                    \
4081 }
4082
4083 def_access_sw_cpu(rc_acks);
4084 def_access_sw_cpu(rc_qacks);
4085 def_access_sw_cpu(rc_delayed_comp);
4086
4087 #define def_access_ibp_counter(cntr) \
4088 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
4089                                 void *context, int vl, int mode, u64 data)    \
4090 {                                                                             \
4091         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4092                                                                               \
4093         if (vl != CNTR_INVALID_VL)                                            \
4094                 return 0;                                                     \
4095                                                                               \
4096         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4097                              mode, data);                                     \
4098 }
4099
4100 def_access_ibp_counter(loop_pkts);
4101 def_access_ibp_counter(rc_resends);
4102 def_access_ibp_counter(rnr_naks);
4103 def_access_ibp_counter(other_naks);
4104 def_access_ibp_counter(rc_timeouts);
4105 def_access_ibp_counter(pkt_drops);
4106 def_access_ibp_counter(dmawait);
4107 def_access_ibp_counter(rc_seqnak);
4108 def_access_ibp_counter(rc_dupreq);
4109 def_access_ibp_counter(rdma_seq);
4110 def_access_ibp_counter(unaligned);
4111 def_access_ibp_counter(seq_naks);
4112 def_access_ibp_counter(rc_crwaits);
4113
4114 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4115 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4116 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4117 [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
4118 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4119 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4120 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4121                         CNTR_NORMAL),
4122 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4123                         CNTR_NORMAL),
4124 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4125                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
4126                         CNTR_NORMAL),
4127 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4128                         CNTR_NORMAL),
4129 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4130                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4131 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4132                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4133 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4134                         CNTR_NORMAL),
4135 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4136                         CNTR_NORMAL),
4137 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4138                         CNTR_NORMAL),
4139 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4140                         CNTR_NORMAL),
4141 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4142                         CNTR_NORMAL),
4143 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4144                         CNTR_NORMAL),
4145 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4146                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4147 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4148                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4149 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4150                               CNTR_SYNTH),
4151 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4152                             access_dc_rcv_err_cnt),
4153 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4154                                  CNTR_SYNTH),
4155 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4156                                   CNTR_SYNTH),
4157 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4158                                   CNTR_SYNTH),
4159 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4160                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4161 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4162                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4163                                   CNTR_SYNTH),
4164 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4165                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4166 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4167                                CNTR_SYNTH),
4168 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4169                               CNTR_SYNTH),
4170 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4171                                CNTR_SYNTH),
4172 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4173                                  CNTR_SYNTH),
4174 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4175                                 CNTR_SYNTH),
4176 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4177                                 CNTR_SYNTH),
4178 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4179                                CNTR_SYNTH),
4180 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4181                                  CNTR_SYNTH | CNTR_VL),
4182 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4183                                 CNTR_SYNTH | CNTR_VL),
4184 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4185 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4186                                  CNTR_SYNTH | CNTR_VL),
4187 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4188 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4189                                  CNTR_SYNTH | CNTR_VL),
4190 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4191                               CNTR_SYNTH),
4192 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4193                                  CNTR_SYNTH | CNTR_VL),
4194 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4195                                 CNTR_SYNTH),
4196 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4197                                    CNTR_SYNTH | CNTR_VL),
4198 [C_DC_TOTAL_CRC] =
4199         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4200                          CNTR_SYNTH),
4201 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4202                                   CNTR_SYNTH),
4203 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4204                                   CNTR_SYNTH),
4205 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4206                                   CNTR_SYNTH),
4207 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4208                                   CNTR_SYNTH),
4209 [C_DC_CRC_MULT_LN] =
4210         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4211                          CNTR_SYNTH),
4212 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4213                                     CNTR_SYNTH),
4214 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4215                                     CNTR_SYNTH),
4216 [C_DC_SEQ_CRC_CNT] =
4217         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4218                          CNTR_SYNTH),
4219 [C_DC_ESC0_ONLY_CNT] =
4220         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4221                          CNTR_SYNTH),
4222 [C_DC_ESC0_PLUS1_CNT] =
4223         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4224                          CNTR_SYNTH),
4225 [C_DC_ESC0_PLUS2_CNT] =
4226         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4227                          CNTR_SYNTH),
4228 [C_DC_REINIT_FROM_PEER_CNT] =
4229         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4230                          CNTR_SYNTH),
4231 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4232                                   CNTR_SYNTH),
4233 [C_DC_MISC_FLG_CNT] =
4234         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4235                          CNTR_SYNTH),
4236 [C_DC_PRF_GOOD_LTP_CNT] =
4237         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4238 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4239         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4240                          CNTR_SYNTH),
4241 [C_DC_PRF_RX_FLIT_CNT] =
4242         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4243 [C_DC_PRF_TX_FLIT_CNT] =
4244         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4245 [C_DC_PRF_CLK_CNTR] =
4246         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4247 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4248         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4249 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4250         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4251                          CNTR_SYNTH),
4252 [C_DC_PG_STS_TX_SBE_CNT] =
4253         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4254 [C_DC_PG_STS_TX_MBE_CNT] =
4255         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4256                          CNTR_SYNTH),
4257 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4258                             access_sw_cpu_intr),
4259 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4260                             access_sw_cpu_rcv_limit),
4261 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4262                             access_sw_ctx0_seq_drop),
4263 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4264                             access_sw_vtx_wait),
4265 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4266                             access_sw_pio_wait),
4267 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4268                             access_sw_pio_drain),
4269 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4270                             access_sw_kmem_wait),
4271 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4272                             hfi1_access_sw_tid_wait),
4273 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4274                             access_sw_send_schedule),
4275 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4276                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4277                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4278                                       dev_access_u32_csr),
4279 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4280                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4281                              access_sde_int_cnt),
4282 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4283                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4284                              access_sde_err_cnt),
4285 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4286                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4287                                   access_sde_idle_int_cnt),
4288 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4289                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4290                                       access_sde_progress_int_cnt),
4291 /* MISC_ERR_STATUS */
4292 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4293                                 CNTR_NORMAL,
4294                                 access_misc_pll_lock_fail_err_cnt),
4295 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4296                                 CNTR_NORMAL,
4297                                 access_misc_mbist_fail_err_cnt),
4298 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4299                                 CNTR_NORMAL,
4300                                 access_misc_invalid_eep_cmd_err_cnt),
4301 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4302                                 CNTR_NORMAL,
4303                                 access_misc_efuse_done_parity_err_cnt),
4304 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4305                                 CNTR_NORMAL,
4306                                 access_misc_efuse_write_err_cnt),
4307 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4308                                 0, CNTR_NORMAL,
4309                                 access_misc_efuse_read_bad_addr_err_cnt),
4310 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4311                                 CNTR_NORMAL,
4312                                 access_misc_efuse_csr_parity_err_cnt),
4313 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4314                                 CNTR_NORMAL,
4315                                 access_misc_fw_auth_failed_err_cnt),
4316 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4317                                 CNTR_NORMAL,
4318                                 access_misc_key_mismatch_err_cnt),
4319 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4320                                 CNTR_NORMAL,
4321                                 access_misc_sbus_write_failed_err_cnt),
4322 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4323                                 CNTR_NORMAL,
4324                                 access_misc_csr_write_bad_addr_err_cnt),
4325 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4326                                 CNTR_NORMAL,
4327                                 access_misc_csr_read_bad_addr_err_cnt),
4328 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4329                                 CNTR_NORMAL,
4330                                 access_misc_csr_parity_err_cnt),
4331 /* CceErrStatus */
4332 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4333                                 CNTR_NORMAL,
4334                                 access_sw_cce_err_status_aggregated_cnt),
4335 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4336                                 CNTR_NORMAL,
4337                                 access_cce_msix_csr_parity_err_cnt),
4338 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4339                                 CNTR_NORMAL,
4340                                 access_cce_int_map_unc_err_cnt),
4341 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4342                                 CNTR_NORMAL,
4343                                 access_cce_int_map_cor_err_cnt),
4344 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4345                                 CNTR_NORMAL,
4346                                 access_cce_msix_table_unc_err_cnt),
4347 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4348                                 CNTR_NORMAL,
4349                                 access_cce_msix_table_cor_err_cnt),
4350 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4351                                 0, CNTR_NORMAL,
4352                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4353 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4354                                 0, CNTR_NORMAL,
4355                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4356 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4357                                 CNTR_NORMAL,
4358                                 access_cce_seg_write_bad_addr_err_cnt),
4359 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4360                                 CNTR_NORMAL,
4361                                 access_cce_seg_read_bad_addr_err_cnt),
4362 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4363                                 CNTR_NORMAL,
4364                                 access_la_triggered_cnt),
4365 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4366                                 CNTR_NORMAL,
4367                                 access_cce_trgt_cpl_timeout_err_cnt),
4368 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4369                                 CNTR_NORMAL,
4370                                 access_pcic_receive_parity_err_cnt),
4371 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4372                                 CNTR_NORMAL,
4373                                 access_pcic_transmit_back_parity_err_cnt),
4374 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4375                                 0, CNTR_NORMAL,
4376                                 access_pcic_transmit_front_parity_err_cnt),
4377 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4378                                 CNTR_NORMAL,
4379                                 access_pcic_cpl_dat_q_unc_err_cnt),
4380 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4381                                 CNTR_NORMAL,
4382                                 access_pcic_cpl_hd_q_unc_err_cnt),
4383 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4384                                 CNTR_NORMAL,
4385                                 access_pcic_post_dat_q_unc_err_cnt),
4386 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4387                                 CNTR_NORMAL,
4388                                 access_pcic_post_hd_q_unc_err_cnt),
4389 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4390                                 CNTR_NORMAL,
4391                                 access_pcic_retry_sot_mem_unc_err_cnt),
4392 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4393                                 CNTR_NORMAL,
4394                                 access_pcic_retry_mem_unc_err),
4395 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4396                                 CNTR_NORMAL,
4397                                 access_pcic_n_post_dat_q_parity_err_cnt),
4398 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4399                                 CNTR_NORMAL,
4400                                 access_pcic_n_post_h_q_parity_err_cnt),
4401 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4402                                 CNTR_NORMAL,
4403                                 access_pcic_cpl_dat_q_cor_err_cnt),
4404 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4405                                 CNTR_NORMAL,
4406                                 access_pcic_cpl_hd_q_cor_err_cnt),
4407 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4408                                 CNTR_NORMAL,
4409                                 access_pcic_post_dat_q_cor_err_cnt),
4410 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4411                                 CNTR_NORMAL,
4412                                 access_pcic_post_hd_q_cor_err_cnt),
4413 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4414                                 CNTR_NORMAL,
4415                                 access_pcic_retry_sot_mem_cor_err_cnt),
4416 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4417                                 CNTR_NORMAL,
4418                                 access_pcic_retry_mem_cor_err_cnt),
4419 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4420                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4421                                 CNTR_NORMAL,
4422                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4423 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4424                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4425                                 CNTR_NORMAL,
4426                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4427                                 ),
4428 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4429                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4430                         CNTR_NORMAL,
4431                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4432 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4433                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4434                         CNTR_NORMAL,
4435                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4436 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4437                         0, CNTR_NORMAL,
4438                         access_cce_cli2_async_fifo_parity_err_cnt),
4439 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4440                         CNTR_NORMAL,
4441                         access_cce_csr_cfg_bus_parity_err_cnt),
4442 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4443                         0, CNTR_NORMAL,
4444                         access_cce_cli0_async_fifo_parity_err_cnt),
4445 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4446                         CNTR_NORMAL,
4447                         access_cce_rspd_data_parity_err_cnt),
4448 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4449                         CNTR_NORMAL,
4450                         access_cce_trgt_access_err_cnt),
4451 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4452                         0, CNTR_NORMAL,
4453                         access_cce_trgt_async_fifo_parity_err_cnt),
4454 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4455                         CNTR_NORMAL,
4456                         access_cce_csr_write_bad_addr_err_cnt),
4457 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4458                         CNTR_NORMAL,
4459                         access_cce_csr_read_bad_addr_err_cnt),
4460 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4461                         CNTR_NORMAL,
4462                         access_ccs_csr_parity_err_cnt),
4463
4464 /* RcvErrStatus */
4465 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4466                         CNTR_NORMAL,
4467                         access_rx_csr_parity_err_cnt),
4468 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4469                         CNTR_NORMAL,
4470                         access_rx_csr_write_bad_addr_err_cnt),
4471 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4472                         CNTR_NORMAL,
4473                         access_rx_csr_read_bad_addr_err_cnt),
4474 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4475                         CNTR_NORMAL,
4476                         access_rx_dma_csr_unc_err_cnt),
4477 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4478                         CNTR_NORMAL,
4479                         access_rx_dma_dq_fsm_encoding_err_cnt),
4480 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4481                         CNTR_NORMAL,
4482                         access_rx_dma_eq_fsm_encoding_err_cnt),
4483 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4484                         CNTR_NORMAL,
4485                         access_rx_dma_csr_parity_err_cnt),
4486 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4487                         CNTR_NORMAL,
4488                         access_rx_rbuf_data_cor_err_cnt),
4489 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4490                         CNTR_NORMAL,
4491                         access_rx_rbuf_data_unc_err_cnt),
4492 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4493                         CNTR_NORMAL,
4494                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4495 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4496                         CNTR_NORMAL,
4497                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4498 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4499                         CNTR_NORMAL,
4500                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4501 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4502                         CNTR_NORMAL,
4503                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4504 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4505                         CNTR_NORMAL,
4506                         access_rx_rbuf_desc_part2_cor_err_cnt),
4507 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4508                         CNTR_NORMAL,
4509                         access_rx_rbuf_desc_part2_unc_err_cnt),
4510 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4511                         CNTR_NORMAL,
4512                         access_rx_rbuf_desc_part1_cor_err_cnt),
4513 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4514                         CNTR_NORMAL,
4515                         access_rx_rbuf_desc_part1_unc_err_cnt),
4516 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4517                         CNTR_NORMAL,
4518                         access_rx_hq_intr_fsm_err_cnt),
4519 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4520                         CNTR_NORMAL,
4521                         access_rx_hq_intr_csr_parity_err_cnt),
4522 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4523                         CNTR_NORMAL,
4524                         access_rx_lookup_csr_parity_err_cnt),
4525 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4526                         CNTR_NORMAL,
4527                         access_rx_lookup_rcv_array_cor_err_cnt),
4528 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4529                         CNTR_NORMAL,
4530                         access_rx_lookup_rcv_array_unc_err_cnt),
4531 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4532                         0, CNTR_NORMAL,
4533                         access_rx_lookup_des_part2_parity_err_cnt),
4534 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4535                         0, CNTR_NORMAL,
4536                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4537 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4538                         CNTR_NORMAL,
4539                         access_rx_lookup_des_part1_unc_err_cnt),
4540 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4541                         CNTR_NORMAL,
4542                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4543 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4544                         CNTR_NORMAL,
4545                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4546 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4547                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4548                         CNTR_NORMAL,
4549                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4550 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4551                         0, CNTR_NORMAL,
4552                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4553 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4554                         0, CNTR_NORMAL,
4555                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4556 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4557                         CNTR_NORMAL,
4558                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4559 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4560                         CNTR_NORMAL,
4561                         access_rx_rbuf_empty_err_cnt),
4562 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4563                         CNTR_NORMAL,
4564                         access_rx_rbuf_full_err_cnt),
4565 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4566                         CNTR_NORMAL,
4567                         access_rbuf_bad_lookup_err_cnt),
4568 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4569                         CNTR_NORMAL,
4570                         access_rbuf_ctx_id_parity_err_cnt),
4571 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4572                         CNTR_NORMAL,
4573                         access_rbuf_csr_qeopdw_parity_err_cnt),
4574 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4575                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4576                         CNTR_NORMAL,
4577                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4578 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4579                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4580                         CNTR_NORMAL,
4581                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4582 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4583                         0, CNTR_NORMAL,
4584                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4585 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4586                         0, CNTR_NORMAL,
4587                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4588 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4589                         0, 0, CNTR_NORMAL,
4590                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4591 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4592                         0, CNTR_NORMAL,
4593                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4594 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4595                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4596                         CNTR_NORMAL,
4597                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4598 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4599                         0, CNTR_NORMAL,
4600                         access_rx_rbuf_block_list_read_cor_err_cnt),
4601 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4602                         0, CNTR_NORMAL,
4603                         access_rx_rbuf_block_list_read_unc_err_cnt),
4604 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4605                         CNTR_NORMAL,
4606                         access_rx_rbuf_lookup_des_cor_err_cnt),
4607 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4608                         CNTR_NORMAL,
4609                         access_rx_rbuf_lookup_des_unc_err_cnt),
4610 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4611                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4612                         CNTR_NORMAL,
4613                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4614 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4615                         CNTR_NORMAL,
4616                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4617 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4618                         CNTR_NORMAL,
4619                         access_rx_rbuf_free_list_cor_err_cnt),
4620 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4621                         CNTR_NORMAL,
4622                         access_rx_rbuf_free_list_unc_err_cnt),
4623 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4624                         CNTR_NORMAL,
4625                         access_rx_rcv_fsm_encoding_err_cnt),
4626 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4627                         CNTR_NORMAL,
4628                         access_rx_dma_flag_cor_err_cnt),
4629 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4630                         CNTR_NORMAL,
4631                         access_rx_dma_flag_unc_err_cnt),
4632 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4633                         CNTR_NORMAL,
4634                         access_rx_dc_sop_eop_parity_err_cnt),
4635 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4636                         CNTR_NORMAL,
4637                         access_rx_rcv_csr_parity_err_cnt),
4638 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4639                         CNTR_NORMAL,
4640                         access_rx_rcv_qp_map_table_cor_err_cnt),
4641 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4642                         CNTR_NORMAL,
4643                         access_rx_rcv_qp_map_table_unc_err_cnt),
4644 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4645                         CNTR_NORMAL,
4646                         access_rx_rcv_data_cor_err_cnt),
4647 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4648                         CNTR_NORMAL,
4649                         access_rx_rcv_data_unc_err_cnt),
4650 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4651                         CNTR_NORMAL,
4652                         access_rx_rcv_hdr_cor_err_cnt),
4653 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4654                         CNTR_NORMAL,
4655                         access_rx_rcv_hdr_unc_err_cnt),
4656 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4657                         CNTR_NORMAL,
4658                         access_rx_dc_intf_parity_err_cnt),
4659 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4660                         CNTR_NORMAL,
4661                         access_rx_dma_csr_cor_err_cnt),
4662 /* SendPioErrStatus */
4663 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4664                         CNTR_NORMAL,
4665                         access_pio_pec_sop_head_parity_err_cnt),
4666 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4667                         CNTR_NORMAL,
4668                         access_pio_pcc_sop_head_parity_err_cnt),
4669 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4670                         0, 0, CNTR_NORMAL,
4671                         access_pio_last_returned_cnt_parity_err_cnt),
4672 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4673                         0, CNTR_NORMAL,
4674                         access_pio_current_free_cnt_parity_err_cnt),
4675 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4676                         CNTR_NORMAL,
4677                         access_pio_reserved_31_err_cnt),
4678 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4679                         CNTR_NORMAL,
4680                         access_pio_reserved_30_err_cnt),
4681 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4682                         CNTR_NORMAL,
4683                         access_pio_ppmc_sop_len_err_cnt),
4684 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4685                         CNTR_NORMAL,
4686                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4687 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4688                         CNTR_NORMAL,
4689                         access_pio_vl_fifo_parity_err_cnt),
4690 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4691                         CNTR_NORMAL,
4692                         access_pio_vlf_sop_parity_err_cnt),
4693 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4694                         CNTR_NORMAL,
4695                         access_pio_vlf_v1_len_parity_err_cnt),
4696 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4697                         CNTR_NORMAL,
4698                         access_pio_block_qw_count_parity_err_cnt),
4699 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4700                         CNTR_NORMAL,
4701                         access_pio_write_qw_valid_parity_err_cnt),
4702 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4703                         CNTR_NORMAL,
4704                         access_pio_state_machine_err_cnt),
4705 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4706                         CNTR_NORMAL,
4707                         access_pio_write_data_parity_err_cnt),
4708 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4709                         CNTR_NORMAL,
4710                         access_pio_host_addr_mem_cor_err_cnt),
4711 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4712                         CNTR_NORMAL,
4713                         access_pio_host_addr_mem_unc_err_cnt),
4714 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4715                         CNTR_NORMAL,
4716                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4717 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4718                         CNTR_NORMAL,
4719                         access_pio_init_sm_in_err_cnt),
4720 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4721                         CNTR_NORMAL,
4722                         access_pio_ppmc_pbl_fifo_err_cnt),
4723 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4724                         0, CNTR_NORMAL,
4725                         access_pio_credit_ret_fifo_parity_err_cnt),
4726 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4727                         CNTR_NORMAL,
4728                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4729 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4730                         CNTR_NORMAL,
4731                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4732 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4733                         CNTR_NORMAL,
4734                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4735 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4736                         CNTR_NORMAL,
4737                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4738 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4739                         CNTR_NORMAL,
4740                         access_pio_sm_pkt_reset_parity_err_cnt),
4741 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4742                         CNTR_NORMAL,
4743                         access_pio_pkt_evict_fifo_parity_err_cnt),
4744 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4745                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4746                         CNTR_NORMAL,
4747                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4748 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4749                         CNTR_NORMAL,
4750                         access_pio_sbrdctl_crrel_parity_err_cnt),
4751 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4752                         CNTR_NORMAL,
4753                         access_pio_pec_fifo_parity_err_cnt),
4754 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4755                         CNTR_NORMAL,
4756                         access_pio_pcc_fifo_parity_err_cnt),
4757 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4758                         CNTR_NORMAL,
4759                         access_pio_sb_mem_fifo1_err_cnt),
4760 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4761                         CNTR_NORMAL,
4762                         access_pio_sb_mem_fifo0_err_cnt),
4763 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4764                         CNTR_NORMAL,
4765                         access_pio_csr_parity_err_cnt),
4766 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4767                         CNTR_NORMAL,
4768                         access_pio_write_addr_parity_err_cnt),
4769 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4770                         CNTR_NORMAL,
4771                         access_pio_write_bad_ctxt_err_cnt),
4772 /* SendDmaErrStatus */
4773 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4774                         0, CNTR_NORMAL,
4775                         access_sdma_pcie_req_tracking_cor_err_cnt),
4776 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4777                         0, CNTR_NORMAL,
4778                         access_sdma_pcie_req_tracking_unc_err_cnt),
4779 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4780                         CNTR_NORMAL,
4781                         access_sdma_csr_parity_err_cnt),
4782 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4783                         CNTR_NORMAL,
4784                         access_sdma_rpy_tag_err_cnt),
4785 /* SendEgressErrStatus */
4786 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4787                         CNTR_NORMAL,
4788                         access_tx_read_pio_memory_csr_unc_err_cnt),
4789 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4790                         0, CNTR_NORMAL,
4791                         access_tx_read_sdma_memory_csr_err_cnt),
4792 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4793                         CNTR_NORMAL,
4794                         access_tx_egress_fifo_cor_err_cnt),
4795 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4796                         CNTR_NORMAL,
4797                         access_tx_read_pio_memory_cor_err_cnt),
4798 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4799                         CNTR_NORMAL,
4800                         access_tx_read_sdma_memory_cor_err_cnt),
4801 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4802                         CNTR_NORMAL,
4803                         access_tx_sb_hdr_cor_err_cnt),
4804 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4805                         CNTR_NORMAL,
4806                         access_tx_credit_overrun_err_cnt),
4807 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4808                         CNTR_NORMAL,
4809                         access_tx_launch_fifo8_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4811                         CNTR_NORMAL,
4812                         access_tx_launch_fifo7_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4814                         CNTR_NORMAL,
4815                         access_tx_launch_fifo6_cor_err_cnt),
4816 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4817                         CNTR_NORMAL,
4818                         access_tx_launch_fifo5_cor_err_cnt),
4819 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4820                         CNTR_NORMAL,
4821                         access_tx_launch_fifo4_cor_err_cnt),
4822 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4823                         CNTR_NORMAL,
4824                         access_tx_launch_fifo3_cor_err_cnt),
4825 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4826                         CNTR_NORMAL,
4827                         access_tx_launch_fifo2_cor_err_cnt),
4828 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4829                         CNTR_NORMAL,
4830                         access_tx_launch_fifo1_cor_err_cnt),
4831 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4832                         CNTR_NORMAL,
4833                         access_tx_launch_fifo0_cor_err_cnt),
4834 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4835                         CNTR_NORMAL,
4836                         access_tx_credit_return_vl_err_cnt),
4837 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4838                         CNTR_NORMAL,
4839                         access_tx_hcrc_insertion_err_cnt),
4840 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4841                         CNTR_NORMAL,
4842                         access_tx_egress_fifo_unc_err_cnt),
4843 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4844                         CNTR_NORMAL,
4845                         access_tx_read_pio_memory_unc_err_cnt),
4846 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4847                         CNTR_NORMAL,
4848                         access_tx_read_sdma_memory_unc_err_cnt),
4849 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4850                         CNTR_NORMAL,
4851                         access_tx_sb_hdr_unc_err_cnt),
4852 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4853                         CNTR_NORMAL,
4854                         access_tx_credit_return_partiy_err_cnt),
4855 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4856                         0, 0, CNTR_NORMAL,
4857                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4859                         0, 0, CNTR_NORMAL,
4860                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4862                         0, 0, CNTR_NORMAL,
4863                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4864 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4865                         0, 0, CNTR_NORMAL,
4866                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4867 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4868                         0, 0, CNTR_NORMAL,
4869                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4870 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4871                         0, 0, CNTR_NORMAL,
4872                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4873 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4874                         0, 0, CNTR_NORMAL,
4875                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4876 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4877                         0, 0, CNTR_NORMAL,
4878                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4879 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4880                         0, 0, CNTR_NORMAL,
4881                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4882 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4883                         0, 0, CNTR_NORMAL,
4884                         access_tx_sdma15_disallowed_packet_err_cnt),
4885 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4886                         0, 0, CNTR_NORMAL,
4887                         access_tx_sdma14_disallowed_packet_err_cnt),
4888 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4889                         0, 0, CNTR_NORMAL,
4890                         access_tx_sdma13_disallowed_packet_err_cnt),
4891 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4892                         0, 0, CNTR_NORMAL,
4893                         access_tx_sdma12_disallowed_packet_err_cnt),
4894 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4895                         0, 0, CNTR_NORMAL,
4896                         access_tx_sdma11_disallowed_packet_err_cnt),
4897 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4898                         0, 0, CNTR_NORMAL,
4899                         access_tx_sdma10_disallowed_packet_err_cnt),
4900 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4901                         0, 0, CNTR_NORMAL,
4902                         access_tx_sdma9_disallowed_packet_err_cnt),
4903 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4904                         0, 0, CNTR_NORMAL,
4905                         access_tx_sdma8_disallowed_packet_err_cnt),
4906 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4907                         0, 0, CNTR_NORMAL,
4908                         access_tx_sdma7_disallowed_packet_err_cnt),
4909 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4910                         0, 0, CNTR_NORMAL,
4911                         access_tx_sdma6_disallowed_packet_err_cnt),
4912 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4913                         0, 0, CNTR_NORMAL,
4914                         access_tx_sdma5_disallowed_packet_err_cnt),
4915 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4916                         0, 0, CNTR_NORMAL,
4917                         access_tx_sdma4_disallowed_packet_err_cnt),
4918 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4919                         0, 0, CNTR_NORMAL,
4920                         access_tx_sdma3_disallowed_packet_err_cnt),
4921 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4922                         0, 0, CNTR_NORMAL,
4923                         access_tx_sdma2_disallowed_packet_err_cnt),
4924 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4925                         0, 0, CNTR_NORMAL,
4926                         access_tx_sdma1_disallowed_packet_err_cnt),
4927 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4928                         0, 0, CNTR_NORMAL,
4929                         access_tx_sdma0_disallowed_packet_err_cnt),
4930 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4931                         CNTR_NORMAL,
4932                         access_tx_config_parity_err_cnt),
4933 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4934                         CNTR_NORMAL,
4935                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4936 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4937                         CNTR_NORMAL,
4938                         access_tx_launch_csr_parity_err_cnt),
4939 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4940                         CNTR_NORMAL,
4941                         access_tx_illegal_vl_err_cnt),
4942 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4943                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4944                         CNTR_NORMAL,
4945                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4946 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4947                         CNTR_NORMAL,
4948                         access_egress_reserved_10_err_cnt),
4949 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4950                         CNTR_NORMAL,
4951                         access_egress_reserved_9_err_cnt),
4952 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4953                         0, 0, CNTR_NORMAL,
4954                         access_tx_sdma_launch_intf_parity_err_cnt),
4955 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4956                         CNTR_NORMAL,
4957                         access_tx_pio_launch_intf_parity_err_cnt),
4958 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4959                         CNTR_NORMAL,
4960                         access_egress_reserved_6_err_cnt),
4961 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4962                         CNTR_NORMAL,
4963                         access_tx_incorrect_link_state_err_cnt),
4964 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4965                         CNTR_NORMAL,
4966                         access_tx_linkdown_err_cnt),
4967 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4968                         "EgressFifoUnderrunOrParityErr", 0, 0,
4969                         CNTR_NORMAL,
4970                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4971 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4972                         CNTR_NORMAL,
4973                         access_egress_reserved_2_err_cnt),
4974 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4975                         CNTR_NORMAL,
4976                         access_tx_pkt_integrity_mem_unc_err_cnt),
4977 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4978                         CNTR_NORMAL,
4979                         access_tx_pkt_integrity_mem_cor_err_cnt),
4980 /* SendErrStatus */
4981 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4982                         CNTR_NORMAL,
4983                         access_send_csr_write_bad_addr_err_cnt),
4984 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4985                         CNTR_NORMAL,
4986                         access_send_csr_read_bad_addr_err_cnt),
4987 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4988                         CNTR_NORMAL,
4989                         access_send_csr_parity_cnt),
4990 /* SendCtxtErrStatus */
4991 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4992                         CNTR_NORMAL,
4993                         access_pio_write_out_of_bounds_err_cnt),
4994 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4995                         CNTR_NORMAL,
4996                         access_pio_write_overflow_err_cnt),
4997 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4998                         0, 0, CNTR_NORMAL,
4999                         access_pio_write_crosses_boundary_err_cnt),
5000 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
5001                         CNTR_NORMAL,
5002                         access_pio_disallowed_packet_err_cnt),
5003 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
5004                         CNTR_NORMAL,
5005                         access_pio_inconsistent_sop_err_cnt),
5006 /* SendDmaEngErrStatus */
5007 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
5008                         0, 0, CNTR_NORMAL,
5009                         access_sdma_header_request_fifo_cor_err_cnt),
5010 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5011                         CNTR_NORMAL,
5012                         access_sdma_header_storage_cor_err_cnt),
5013 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5014                         CNTR_NORMAL,
5015                         access_sdma_packet_tracking_cor_err_cnt),
5016 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5017                         CNTR_NORMAL,
5018                         access_sdma_assembly_cor_err_cnt),
5019 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5020                         CNTR_NORMAL,
5021                         access_sdma_desc_table_cor_err_cnt),
5022 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5023                         0, 0, CNTR_NORMAL,
5024                         access_sdma_header_request_fifo_unc_err_cnt),
5025 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5026                         CNTR_NORMAL,
5027                         access_sdma_header_storage_unc_err_cnt),
5028 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5029                         CNTR_NORMAL,
5030                         access_sdma_packet_tracking_unc_err_cnt),
5031 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5032                         CNTR_NORMAL,
5033                         access_sdma_assembly_unc_err_cnt),
5034 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5035                         CNTR_NORMAL,
5036                         access_sdma_desc_table_unc_err_cnt),
5037 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5038                         CNTR_NORMAL,
5039                         access_sdma_timeout_err_cnt),
5040 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5041                         CNTR_NORMAL,
5042                         access_sdma_header_length_err_cnt),
5043 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5044                         CNTR_NORMAL,
5045                         access_sdma_header_address_err_cnt),
5046 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5047                         CNTR_NORMAL,
5048                         access_sdma_header_select_err_cnt),
5049 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5050                         CNTR_NORMAL,
5051                         access_sdma_reserved_9_err_cnt),
5052 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5053                         CNTR_NORMAL,
5054                         access_sdma_packet_desc_overflow_err_cnt),
5055 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5056                         CNTR_NORMAL,
5057                         access_sdma_length_mismatch_err_cnt),
5058 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5059                         CNTR_NORMAL,
5060                         access_sdma_halt_err_cnt),
5061 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5062                         CNTR_NORMAL,
5063                         access_sdma_mem_read_err_cnt),
5064 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5065                         CNTR_NORMAL,
5066                         access_sdma_first_desc_err_cnt),
5067 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5068                         CNTR_NORMAL,
5069                         access_sdma_tail_out_of_bounds_err_cnt),
5070 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5071                         CNTR_NORMAL,
5072                         access_sdma_too_long_err_cnt),
5073 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5074                         CNTR_NORMAL,
5075                         access_sdma_gen_mismatch_err_cnt),
5076 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5077                         CNTR_NORMAL,
5078                         access_sdma_wrong_dw_err_cnt),
5079 };
5080
5081 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5082 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5083                         CNTR_NORMAL),
5084 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5085                         CNTR_NORMAL),
5086 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5087                         CNTR_NORMAL),
5088 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5089                         CNTR_NORMAL),
5090 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5091                         CNTR_NORMAL),
5092 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5093                         CNTR_NORMAL),
5094 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5095                         CNTR_NORMAL),
5096 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5097 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5098 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5099 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5100                                       CNTR_SYNTH | CNTR_VL),
5101 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5102                                      CNTR_SYNTH | CNTR_VL),
5103 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5104                                       CNTR_SYNTH | CNTR_VL),
5105 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5106 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5107 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5108                              access_sw_link_dn_cnt),
5109 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5110                            access_sw_link_up_cnt),
5111 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5112                                  access_sw_unknown_frame_cnt),
5113 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5114                              access_sw_xmit_discards),
5115 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5116                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5117                                 access_sw_xmit_discards),
5118 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5119                                  access_xmit_constraint_errs),
5120 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5121                                 access_rcv_constraint_errs),
5122 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5123 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5124 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5125 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5126 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5127 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5128 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5129 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5130 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5131 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5132 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5133 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5134 [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
5135 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5136                                access_sw_cpu_rc_acks),
5137 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5138                                 access_sw_cpu_rc_qacks),
5139 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5140                                        access_sw_cpu_rc_delayed_comp),
5141 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5142 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5143 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5144 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5145 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5146 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5147 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5148 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5149 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5150 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5151 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5152 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5153 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5154 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5155 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5156 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5157 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5158 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5159 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5160 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5161 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5162 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5163 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5164 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5165 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5166 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5167 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5168 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5169 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5170 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5171 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5172 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5173 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5174 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5175 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5176 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5177 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5178 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5179 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5180 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5181 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5182 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5183 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5184 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5185 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5186 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5187 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5188 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5189 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5190 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5191 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5192 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5193 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5194 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5195 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5196 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5197 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5198 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5199 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5200 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5201 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5202 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5203 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5204 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5205 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5206 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5207 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5208 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5209 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5210 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5211 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5212 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5213 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5214 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5215 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5216 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5217 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5218 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5219 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5220 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5221 };
5222
5223 /* ======================================================================== */
5224
5225 /* return true if this is chip revision revision a */
5226 int is_ax(struct hfi1_devdata *dd)
5227 {
5228         u8 chip_rev_minor =
5229                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5230                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5231         return (chip_rev_minor & 0xf0) == 0;
5232 }
5233
5234 /* return true if this is chip revision revision b */
5235 int is_bx(struct hfi1_devdata *dd)
5236 {
5237         u8 chip_rev_minor =
5238                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5239                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5240         return (chip_rev_minor & 0xF0) == 0x10;
5241 }
5242
5243 /* return true is kernel urg disabled for rcd */
5244 bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5245 {
5246         u64 mask;
5247         u32 is = IS_RCVURGENT_START + rcd->ctxt;
5248         u8 bit = is % 64;
5249
5250         mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5251         return !(mask & BIT_ULL(bit));
5252 }
5253
5254 /*
5255  * Append string s to buffer buf.  Arguments curp and len are the current
5256  * position and remaining length, respectively.
5257  *
5258  * return 0 on success, 1 on out of room
5259  */
5260 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5261 {
5262         char *p = *curp;
5263         int len = *lenp;
5264         int result = 0; /* success */
5265         char c;
5266
5267         /* add a comma, if first in the buffer */
5268         if (p != buf) {
5269                 if (len == 0) {
5270                         result = 1; /* out of room */
5271                         goto done;
5272                 }
5273                 *p++ = ',';
5274                 len--;
5275         }
5276
5277         /* copy the string */
5278         while ((c = *s++) != 0) {
5279                 if (len == 0) {
5280                         result = 1; /* out of room */
5281                         goto done;
5282                 }
5283                 *p++ = c;
5284                 len--;
5285         }
5286
5287 done:
5288         /* write return values */
5289         *curp = p;
5290         *lenp = len;
5291
5292         return result;
5293 }
5294
5295 /*
5296  * Using the given flag table, print a comma separated string into
5297  * the buffer.  End in '*' if the buffer is too short.
5298  */
5299 static char *flag_string(char *buf, int buf_len, u64 flags,
5300                          struct flag_table *table, int table_size)
5301 {
5302         char extra[32];
5303         char *p = buf;
5304         int len = buf_len;
5305         int no_room = 0;
5306         int i;
5307
5308         /* make sure there is at least 2 so we can form "*" */
5309         if (len < 2)
5310                 return "";
5311
5312         len--;  /* leave room for a nul */
5313         for (i = 0; i < table_size; i++) {
5314                 if (flags & table[i].flag) {
5315                         no_room = append_str(buf, &p, &len, table[i].str);
5316                         if (no_room)
5317                                 break;
5318                         flags &= ~table[i].flag;
5319                 }
5320         }
5321
5322         /* any undocumented bits left? */
5323         if (!no_room && flags) {
5324                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5325                 no_room = append_str(buf, &p, &len, extra);
5326         }
5327
5328         /* add * if ran out of room */
5329         if (no_room) {
5330                 /* may need to back up to add space for a '*' */
5331                 if (len == 0)
5332                         --p;
5333                 *p++ = '*';
5334         }
5335
5336         /* add final nul - space already allocated above */
5337         *p = 0;
5338         return buf;
5339 }
5340
5341 /* first 8 CCE error interrupt source names */
5342 static const char * const cce_misc_names[] = {
5343         "CceErrInt",            /* 0 */
5344         "RxeErrInt",            /* 1 */
5345         "MiscErrInt",           /* 2 */
5346         "Reserved3",            /* 3 */
5347         "PioErrInt",            /* 4 */
5348         "SDmaErrInt",           /* 5 */
5349         "EgressErrInt",         /* 6 */
5350         "TxeErrInt"             /* 7 */
5351 };
5352
5353 /*
5354  * Return the miscellaneous error interrupt name.
5355  */
5356 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5357 {
5358         if (source < ARRAY_SIZE(cce_misc_names))
5359                 strncpy(buf, cce_misc_names[source], bsize);
5360         else
5361                 snprintf(buf, bsize, "Reserved%u",
5362                          source + IS_GENERAL_ERR_START);
5363
5364         return buf;
5365 }
5366
5367 /*
5368  * Return the SDMA engine error interrupt name.
5369  */
5370 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5371 {
5372         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5373         return buf;
5374 }
5375
5376 /*
5377  * Return the send context error interrupt name.
5378  */
5379 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5380 {
5381         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5382         return buf;
5383 }
5384
5385 static const char * const various_names[] = {
5386         "PbcInt",
5387         "GpioAssertInt",
5388         "Qsfp1Int",
5389         "Qsfp2Int",
5390         "TCritInt"
5391 };
5392
5393 /*
5394  * Return the various interrupt name.
5395  */
5396 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5397 {
5398         if (source < ARRAY_SIZE(various_names))
5399                 strncpy(buf, various_names[source], bsize);
5400         else
5401                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5402         return buf;
5403 }
5404
5405 /*
5406  * Return the DC interrupt name.
5407  */
5408 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5409 {
5410         static const char * const dc_int_names[] = {
5411                 "common",
5412                 "lcb",
5413                 "8051",
5414                 "lbm"   /* local block merge */
5415         };
5416
5417         if (source < ARRAY_SIZE(dc_int_names))
5418                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5419         else
5420                 snprintf(buf, bsize, "DCInt%u", source);
5421         return buf;
5422 }
5423
5424 static const char * const sdma_int_names[] = {
5425         "SDmaInt",
5426         "SdmaIdleInt",
5427         "SdmaProgressInt",
5428 };
5429
5430 /*
5431  * Return the SDMA engine interrupt name.
5432  */
5433 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5434 {
5435         /* what interrupt */
5436         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5437         /* which engine */
5438         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5439
5440         if (likely(what < 3))
5441                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5442         else
5443                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5444         return buf;
5445 }
5446
5447 /*
5448  * Return the receive available interrupt name.
5449  */
5450 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5451 {
5452         snprintf(buf, bsize, "RcvAvailInt%u", source);
5453         return buf;
5454 }
5455
5456 /*
5457  * Return the receive urgent interrupt name.
5458  */
5459 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5460 {
5461         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5462         return buf;
5463 }
5464
5465 /*
5466  * Return the send credit interrupt name.
5467  */
5468 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5469 {
5470         snprintf(buf, bsize, "SendCreditInt%u", source);
5471         return buf;
5472 }
5473
5474 /*
5475  * Return the reserved interrupt name.
5476  */
5477 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5478 {
5479         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5480         return buf;
5481 }
5482
5483 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5484 {
5485         return flag_string(buf, buf_len, flags,
5486                            cce_err_status_flags,
5487                            ARRAY_SIZE(cce_err_status_flags));
5488 }
5489
5490 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5491 {
5492         return flag_string(buf, buf_len, flags,
5493                            rxe_err_status_flags,
5494                            ARRAY_SIZE(rxe_err_status_flags));
5495 }
5496
5497 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5498 {
5499         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5500                            ARRAY_SIZE(misc_err_status_flags));
5501 }
5502
5503 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5504 {
5505         return flag_string(buf, buf_len, flags,
5506                            pio_err_status_flags,
5507                            ARRAY_SIZE(pio_err_status_flags));
5508 }
5509
5510 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5511 {
5512         return flag_string(buf, buf_len, flags,
5513                            sdma_err_status_flags,
5514                            ARRAY_SIZE(sdma_err_status_flags));
5515 }
5516
5517 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5518 {
5519         return flag_string(buf, buf_len, flags,
5520                            egress_err_status_flags,
5521                            ARRAY_SIZE(egress_err_status_flags));
5522 }
5523
5524 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5525 {
5526         return flag_string(buf, buf_len, flags,
5527                            egress_err_info_flags,
5528                            ARRAY_SIZE(egress_err_info_flags));
5529 }
5530
5531 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5532 {
5533         return flag_string(buf, buf_len, flags,
5534                            send_err_status_flags,
5535                            ARRAY_SIZE(send_err_status_flags));
5536 }
5537
5538 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5539 {
5540         char buf[96];
5541         int i = 0;
5542
5543         /*
5544          * For most these errors, there is nothing that can be done except
5545          * report or record it.
5546          */
5547         dd_dev_info(dd, "CCE Error: %s\n",
5548                     cce_err_status_string(buf, sizeof(buf), reg));
5549
5550         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5551             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5552                 /* this error requires a manual drop into SPC freeze mode */
5553                 /* then a fix up */
5554                 start_freeze_handling(dd->pport, FREEZE_SELF);
5555         }
5556
5557         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5558                 if (reg & (1ull << i)) {
5559                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5560                         /* maintain a counter over all cce_err_status errors */
5561                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5562                 }
5563         }
5564 }
5565
5566 /*
5567  * Check counters for receive errors that do not have an interrupt
5568  * associated with them.
5569  */
5570 #define RCVERR_CHECK_TIME 10
5571 static void update_rcverr_timer(struct timer_list *t)
5572 {
5573         struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5574         struct hfi1_pportdata *ppd = dd->pport;
5575         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5576
5577         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5578             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5579                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5580                 set_link_down_reason(
5581                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5582                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5583                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5584         }
5585         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5586
5587         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5588 }
5589
5590 static int init_rcverr(struct hfi1_devdata *dd)
5591 {
5592         timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5593         /* Assume the hardware counter has been reset */
5594         dd->rcv_ovfl_cnt = 0;
5595         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5596 }
5597
5598 static void free_rcverr(struct hfi1_devdata *dd)
5599 {
5600         if (dd->rcverr_timer.function)
5601                 del_timer_sync(&dd->rcverr_timer);
5602 }
5603
5604 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5605 {
5606         char buf[96];
5607         int i = 0;
5608
5609         dd_dev_info(dd, "Receive Error: %s\n",
5610                     rxe_err_status_string(buf, sizeof(buf), reg));
5611
5612         if (reg & ALL_RXE_FREEZE_ERR) {
5613                 int flags = 0;
5614
5615                 /*
5616                  * Freeze mode recovery is disabled for the errors
5617                  * in RXE_FREEZE_ABORT_MASK
5618                  */
5619                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5620                         flags = FREEZE_ABORT;
5621
5622                 start_freeze_handling(dd->pport, flags);
5623         }
5624
5625         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5626                 if (reg & (1ull << i))
5627                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5628         }
5629 }
5630
5631 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5632 {
5633         char buf[96];
5634         int i = 0;
5635
5636         dd_dev_info(dd, "Misc Error: %s",
5637                     misc_err_status_string(buf, sizeof(buf), reg));
5638         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5639                 if (reg & (1ull << i))
5640                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5641         }
5642 }
5643
5644 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5645 {
5646         char buf[96];
5647         int i = 0;
5648
5649         dd_dev_info(dd, "PIO Error: %s\n",
5650                     pio_err_status_string(buf, sizeof(buf), reg));
5651
5652         if (reg & ALL_PIO_FREEZE_ERR)
5653                 start_freeze_handling(dd->pport, 0);
5654
5655         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5656                 if (reg & (1ull << i))
5657                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5658         }
5659 }
5660
5661 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5662 {
5663         char buf[96];
5664         int i = 0;
5665
5666         dd_dev_info(dd, "SDMA Error: %s\n",
5667                     sdma_err_status_string(buf, sizeof(buf), reg));
5668
5669         if (reg & ALL_SDMA_FREEZE_ERR)
5670                 start_freeze_handling(dd->pport, 0);
5671
5672         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5673                 if (reg & (1ull << i))
5674                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5675         }
5676 }
5677
5678 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5679 {
5680         incr_cntr64(&ppd->port_xmit_discards);
5681 }
5682
5683 static void count_port_inactive(struct hfi1_devdata *dd)
5684 {
5685         __count_port_discards(dd->pport);
5686 }
5687
5688 /*
5689  * We have had a "disallowed packet" error during egress. Determine the
5690  * integrity check which failed, and update relevant error counter, etc.
5691  *
5692  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5693  * bit of state per integrity check, and so we can miss the reason for an
5694  * egress error if more than one packet fails the same integrity check
5695  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5696  */
5697 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5698                                         int vl)
5699 {
5700         struct hfi1_pportdata *ppd = dd->pport;
5701         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5702         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5703         char buf[96];
5704
5705         /* clear down all observed info as quickly as possible after read */
5706         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5707
5708         dd_dev_info(dd,
5709                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5710                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5711
5712         /* Eventually add other counters for each bit */
5713         if (info & PORT_DISCARD_EGRESS_ERRS) {
5714                 int weight, i;
5715
5716                 /*
5717                  * Count all applicable bits as individual errors and
5718                  * attribute them to the packet that triggered this handler.
5719                  * This may not be completely accurate due to limitations
5720                  * on the available hardware error information.  There is
5721                  * a single information register and any number of error
5722                  * packets may have occurred and contributed to it before
5723                  * this routine is called.  This means that:
5724                  * a) If multiple packets with the same error occur before
5725                  *    this routine is called, earlier packets are missed.
5726                  *    There is only a single bit for each error type.
5727                  * b) Errors may not be attributed to the correct VL.
5728                  *    The driver is attributing all bits in the info register
5729                  *    to the packet that triggered this call, but bits
5730                  *    could be an accumulation of different packets with
5731                  *    different VLs.
5732                  * c) A single error packet may have multiple counts attached
5733                  *    to it.  There is no way for the driver to know if
5734                  *    multiple bits set in the info register are due to a
5735                  *    single packet or multiple packets.  The driver assumes
5736                  *    multiple packets.
5737                  */
5738                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5739                 for (i = 0; i < weight; i++) {
5740                         __count_port_discards(ppd);
5741                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5742                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5743                         else if (vl == 15)
5744                                 incr_cntr64(&ppd->port_xmit_discards_vl
5745                                             [C_VL_15]);
5746                 }
5747         }
5748 }
5749
5750 /*
5751  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5752  * register. Does it represent a 'port inactive' error?
5753  */
5754 static inline int port_inactive_err(u64 posn)
5755 {
5756         return (posn >= SEES(TX_LINKDOWN) &&
5757                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5758 }
5759
5760 /*
5761  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5762  * register. Does it represent a 'disallowed packet' error?
5763  */
5764 static inline int disallowed_pkt_err(int posn)
5765 {
5766         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5767                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5768 }
5769
5770 /*
5771  * Input value is a bit position of one of the SDMA engine disallowed
5772  * packet errors.  Return which engine.  Use of this must be guarded by
5773  * disallowed_pkt_err().
5774  */
5775 static inline int disallowed_pkt_engine(int posn)
5776 {
5777         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5778 }
5779
5780 /*
5781  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5782  * be done.
5783  */
5784 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5785 {
5786         struct sdma_vl_map *m;
5787         int vl;
5788
5789         /* range check */
5790         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5791                 return -1;
5792
5793         rcu_read_lock();
5794         m = rcu_dereference(dd->sdma_map);
5795         vl = m->engine_to_vl[engine];
5796         rcu_read_unlock();
5797
5798         return vl;
5799 }
5800
5801 /*
5802  * Translate the send context (sofware index) into a VL.  Return -1 if the
5803  * translation cannot be done.
5804  */
5805 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5806 {
5807         struct send_context_info *sci;
5808         struct send_context *sc;
5809         int i;
5810
5811         sci = &dd->send_contexts[sw_index];
5812
5813         /* there is no information for user (PSM) and ack contexts */
5814         if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5815                 return -1;
5816
5817         sc = sci->sc;
5818         if (!sc)
5819                 return -1;
5820         if (dd->vld[15].sc == sc)
5821                 return 15;
5822         for (i = 0; i < num_vls; i++)
5823                 if (dd->vld[i].sc == sc)
5824                         return i;
5825
5826         return -1;
5827 }
5828
5829 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5830 {
5831         u64 reg_copy = reg, handled = 0;
5832         char buf[96];
5833         int i = 0;
5834
5835         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5836                 start_freeze_handling(dd->pport, 0);
5837         else if (is_ax(dd) &&
5838                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5839                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5840                 start_freeze_handling(dd->pport, 0);
5841
5842         while (reg_copy) {
5843                 int posn = fls64(reg_copy);
5844                 /* fls64() returns a 1-based offset, we want it zero based */
5845                 int shift = posn - 1;
5846                 u64 mask = 1ULL << shift;
5847
5848                 if (port_inactive_err(shift)) {
5849                         count_port_inactive(dd);
5850                         handled |= mask;
5851                 } else if (disallowed_pkt_err(shift)) {
5852                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5853
5854                         handle_send_egress_err_info(dd, vl);
5855                         handled |= mask;
5856                 }
5857                 reg_copy &= ~mask;
5858         }
5859
5860         reg &= ~handled;
5861
5862         if (reg)
5863                 dd_dev_info(dd, "Egress Error: %s\n",
5864                             egress_err_status_string(buf, sizeof(buf), reg));
5865
5866         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5867                 if (reg & (1ull << i))
5868                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5869         }
5870 }
5871
5872 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5873 {
5874         char buf[96];
5875         int i = 0;
5876
5877         dd_dev_info(dd, "Send Error: %s\n",
5878                     send_err_status_string(buf, sizeof(buf), reg));
5879
5880         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5881                 if (reg & (1ull << i))
5882                         incr_cntr64(&dd->send_err_status_cnt[i]);
5883         }
5884 }
5885
5886 /*
5887  * The maximum number of times the error clear down will loop before
5888  * blocking a repeating error.  This value is arbitrary.
5889  */
5890 #define MAX_CLEAR_COUNT 20
5891
5892 /*
5893  * Clear and handle an error register.  All error interrupts are funneled
5894  * through here to have a central location to correctly handle single-
5895  * or multi-shot errors.
5896  *
5897  * For non per-context registers, call this routine with a context value
5898  * of 0 so the per-context offset is zero.
5899  *
5900  * If the handler loops too many times, assume that something is wrong
5901  * and can't be fixed, so mask the error bits.
5902  */
5903 static void interrupt_clear_down(struct hfi1_devdata *dd,
5904                                  u32 context,
5905                                  const struct err_reg_info *eri)
5906 {
5907         u64 reg;
5908         u32 count;
5909
5910         /* read in a loop until no more errors are seen */
5911         count = 0;
5912         while (1) {
5913                 reg = read_kctxt_csr(dd, context, eri->status);
5914                 if (reg == 0)
5915                         break;
5916                 write_kctxt_csr(dd, context, eri->clear, reg);
5917                 if (likely(eri->handler))
5918                         eri->handler(dd, context, reg);
5919                 count++;
5920                 if (count > MAX_CLEAR_COUNT) {
5921                         u64 mask;
5922
5923                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5924                                    eri->desc, reg);
5925                         /*
5926                          * Read-modify-write so any other masked bits
5927                          * remain masked.
5928                          */
5929                         mask = read_kctxt_csr(dd, context, eri->mask);
5930                         mask &= ~reg;
5931                         write_kctxt_csr(dd, context, eri->mask, mask);
5932                         break;
5933                 }
5934         }
5935 }
5936
5937 /*
5938  * CCE block "misc" interrupt.  Source is < 16.
5939  */
5940 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5941 {
5942         const struct err_reg_info *eri = &misc_errs[source];
5943
5944         if (eri->handler) {
5945                 interrupt_clear_down(dd, 0, eri);
5946         } else {
5947                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5948                            source);
5949         }
5950 }
5951
5952 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5953 {
5954         return flag_string(buf, buf_len, flags,
5955                            sc_err_status_flags,
5956                            ARRAY_SIZE(sc_err_status_flags));
5957 }
5958
5959 /*
5960  * Send context error interrupt.  Source (hw_context) is < 160.
5961  *
5962  * All send context errors cause the send context to halt.  The normal
5963  * clear-down mechanism cannot be used because we cannot clear the
5964  * error bits until several other long-running items are done first.
5965  * This is OK because with the context halted, nothing else is going
5966  * to happen on it anyway.
5967  */
5968 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5969                                 unsigned int hw_context)
5970 {
5971         struct send_context_info *sci;
5972         struct send_context *sc;
5973         char flags[96];
5974         u64 status;
5975         u32 sw_index;
5976         int i = 0;
5977         unsigned long irq_flags;
5978
5979         sw_index = dd->hw_to_sw[hw_context];
5980         if (sw_index >= dd->num_send_contexts) {
5981                 dd_dev_err(dd,
5982                            "out of range sw index %u for send context %u\n",
5983                            sw_index, hw_context);
5984                 return;
5985         }
5986         sci = &dd->send_contexts[sw_index];
5987         spin_lock_irqsave(&dd->sc_lock, irq_flags);
5988         sc = sci->sc;
5989         if (!sc) {
5990                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5991                            sw_index, hw_context);
5992                 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5993                 return;
5994         }
5995
5996         /* tell the software that a halt has begun */
5997         sc_stop(sc, SCF_HALTED);
5998
5999         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
6000
6001         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
6002                     send_context_err_status_string(flags, sizeof(flags),
6003                                                    status));
6004
6005         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
6006                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
6007
6008         /*
6009          * Automatically restart halted kernel contexts out of interrupt
6010          * context.  User contexts must ask the driver to restart the context.
6011          */
6012         if (sc->type != SC_USER)
6013                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
6014         spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6015
6016         /*
6017          * Update the counters for the corresponding status bits.
6018          * Note that these particular counters are aggregated over all
6019          * 160 contexts.
6020          */
6021         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6022                 if (status & (1ull << i))
6023                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6024         }
6025 }
6026
6027 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6028                                 unsigned int source, u64 status)
6029 {
6030         struct sdma_engine *sde;
6031         int i = 0;
6032
6033         sde = &dd->per_sdma[source];
6034 #ifdef CONFIG_SDMA_VERBOSITY
6035         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6036                    slashstrip(__FILE__), __LINE__, __func__);
6037         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6038                    sde->this_idx, source, (unsigned long long)status);
6039 #endif
6040         sde->err_cnt++;
6041         sdma_engine_error(sde, status);
6042
6043         /*
6044         * Update the counters for the corresponding status bits.
6045         * Note that these particular counters are aggregated over
6046         * all 16 DMA engines.
6047         */
6048         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6049                 if (status & (1ull << i))
6050                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6051         }
6052 }
6053
6054 /*
6055  * CCE block SDMA error interrupt.  Source is < 16.
6056  */
6057 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6058 {
6059 #ifdef CONFIG_SDMA_VERBOSITY
6060         struct sdma_engine *sde = &dd->per_sdma[source];
6061
6062         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6063                    slashstrip(__FILE__), __LINE__, __func__);
6064         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6065                    source);
6066         sdma_dumpstate(sde);
6067 #endif
6068         interrupt_clear_down(dd, source, &sdma_eng_err);
6069 }
6070
6071 /*
6072  * CCE block "various" interrupt.  Source is < 8.
6073  */
6074 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6075 {
6076         const struct err_reg_info *eri = &various_err[source];
6077
6078         /*
6079          * TCritInt cannot go through interrupt_clear_down()
6080          * because it is not a second tier interrupt. The handler
6081          * should be called directly.
6082          */
6083         if (source == TCRIT_INT_SOURCE)
6084                 handle_temp_err(dd);
6085         else if (eri->handler)
6086                 interrupt_clear_down(dd, 0, eri);
6087         else
6088                 dd_dev_info(dd,
6089                             "%s: Unimplemented/reserved interrupt %d\n",
6090                             __func__, source);
6091 }
6092
6093 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6094 {
6095         /* src_ctx is always zero */
6096         struct hfi1_pportdata *ppd = dd->pport;
6097         unsigned long flags;
6098         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6099
6100         if (reg & QSFP_HFI0_MODPRST_N) {
6101                 if (!qsfp_mod_present(ppd)) {
6102                         dd_dev_info(dd, "%s: QSFP module removed\n",
6103                                     __func__);
6104
6105                         ppd->driver_link_ready = 0;
6106                         /*
6107                          * Cable removed, reset all our information about the
6108                          * cache and cable capabilities
6109                          */
6110
6111                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6112                         /*
6113                          * We don't set cache_refresh_required here as we expect
6114                          * an interrupt when a cable is inserted
6115                          */
6116                         ppd->qsfp_info.cache_valid = 0;
6117                         ppd->qsfp_info.reset_needed = 0;
6118                         ppd->qsfp_info.limiting_active = 0;
6119                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6120                                                flags);
6121                         /* Invert the ModPresent pin now to detect plug-in */
6122                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6123                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6124
6125                         if ((ppd->offline_disabled_reason >
6126                           HFI1_ODR_MASK(
6127                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6128                           (ppd->offline_disabled_reason ==
6129                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6130                                 ppd->offline_disabled_reason =
6131                                 HFI1_ODR_MASK(
6132                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6133
6134                         if (ppd->host_link_state == HLS_DN_POLL) {
6135                                 /*
6136                                  * The link is still in POLL. This means
6137                                  * that the normal link down processing
6138                                  * will not happen. We have to do it here
6139                                  * before turning the DC off.
6140                                  */
6141                                 queue_work(ppd->link_wq, &ppd->link_down_work);
6142                         }
6143                 } else {
6144                         dd_dev_info(dd, "%s: QSFP module inserted\n",
6145                                     __func__);
6146
6147                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6148                         ppd->qsfp_info.cache_valid = 0;
6149                         ppd->qsfp_info.cache_refresh_required = 1;
6150                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6151                                                flags);
6152
6153                         /*
6154                          * Stop inversion of ModPresent pin to detect
6155                          * removal of the cable
6156                          */
6157                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6158                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6159                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6160
6161                         ppd->offline_disabled_reason =
6162                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6163                 }
6164         }
6165
6166         if (reg & QSFP_HFI0_INT_N) {
6167                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6168                             __func__);
6169                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6170                 ppd->qsfp_info.check_interrupt_flags = 1;
6171                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6172         }
6173
6174         /* Schedule the QSFP work only if there is a cable attached. */
6175         if (qsfp_mod_present(ppd))
6176                 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6177 }
6178
6179 static int request_host_lcb_access(struct hfi1_devdata *dd)
6180 {
6181         int ret;
6182
6183         ret = do_8051_command(dd, HCMD_MISC,
6184                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6185                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6186         if (ret != HCMD_SUCCESS) {
6187                 dd_dev_err(dd, "%s: command failed with error %d\n",
6188                            __func__, ret);
6189         }
6190         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6191 }
6192
6193 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6194 {
6195         int ret;
6196
6197         ret = do_8051_command(dd, HCMD_MISC,
6198                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6199                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6200         if (ret != HCMD_SUCCESS) {
6201                 dd_dev_err(dd, "%s: command failed with error %d\n",
6202                            __func__, ret);
6203         }
6204         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6205 }
6206
6207 /*
6208  * Set the LCB selector - allow host access.  The DCC selector always
6209  * points to the host.
6210  */
6211 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6212 {
6213         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6214                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6215                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6216 }
6217
6218 /*
6219  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6220  * points to the host.
6221  */
6222 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6223 {
6224         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6225                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6226 }
6227
6228 /*
6229  * Acquire LCB access from the 8051.  If the host already has access,
6230  * just increment a counter.  Otherwise, inform the 8051 that the
6231  * host is taking access.
6232  *
6233  * Returns:
6234  *      0 on success
6235  *      -EBUSY if the 8051 has control and cannot be disturbed
6236  *      -errno if unable to acquire access from the 8051
6237  */
6238 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6239 {
6240         struct hfi1_pportdata *ppd = dd->pport;
6241         int ret = 0;
6242
6243         /*
6244          * Use the host link state lock so the operation of this routine
6245          * { link state check, selector change, count increment } can occur
6246          * as a unit against a link state change.  Otherwise there is a
6247          * race between the state change and the count increment.
6248          */
6249         if (sleep_ok) {
6250                 mutex_lock(&ppd->hls_lock);
6251         } else {
6252                 while (!mutex_trylock(&ppd->hls_lock))
6253                         udelay(1);
6254         }
6255
6256         /* this access is valid only when the link is up */
6257         if (ppd->host_link_state & HLS_DOWN) {
6258                 dd_dev_info(dd, "%s: link state %s not up\n",
6259                             __func__, link_state_name(ppd->host_link_state));
6260                 ret = -EBUSY;
6261                 goto done;
6262         }
6263
6264         if (dd->lcb_access_count == 0) {
6265                 ret = request_host_lcb_access(dd);
6266                 if (ret) {
6267                         dd_dev_err(dd,
6268                                    "%s: unable to acquire LCB access, err %d\n",
6269                                    __func__, ret);
6270                         goto done;
6271                 }
6272                 set_host_lcb_access(dd);
6273         }
6274         dd->lcb_access_count++;
6275 done:
6276         mutex_unlock(&ppd->hls_lock);
6277         return ret;
6278 }
6279
6280 /*
6281  * Release LCB access by decrementing the use count.  If the count is moving
6282  * from 1 to 0, inform 8051 that it has control back.
6283  *
6284  * Returns:
6285  *      0 on success
6286  *      -errno if unable to release access to the 8051
6287  */
6288 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6289 {
6290         int ret = 0;
6291
6292         /*
6293          * Use the host link state lock because the acquire needed it.
6294          * Here, we only need to keep { selector change, count decrement }
6295          * as a unit.
6296          */
6297         if (sleep_ok) {
6298                 mutex_lock(&dd->pport->hls_lock);
6299         } else {
6300                 while (!mutex_trylock(&dd->pport->hls_lock))
6301                         udelay(1);
6302         }
6303
6304         if (dd->lcb_access_count == 0) {
6305                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6306                            __func__);
6307                 goto done;
6308         }
6309
6310         if (dd->lcb_access_count == 1) {
6311                 set_8051_lcb_access(dd);
6312                 ret = request_8051_lcb_access(dd);
6313                 if (ret) {
6314                         dd_dev_err(dd,
6315                                    "%s: unable to release LCB access, err %d\n",
6316                                    __func__, ret);
6317                         /* restore host access if the grant didn't work */
6318                         set_host_lcb_access(dd);
6319                         goto done;
6320                 }
6321         }
6322         dd->lcb_access_count--;
6323 done:
6324         mutex_unlock(&dd->pport->hls_lock);
6325         return ret;
6326 }
6327
6328 /*
6329  * Initialize LCB access variables and state.  Called during driver load,
6330  * after most of the initialization is finished.
6331  *
6332  * The DC default is LCB access on for the host.  The driver defaults to
6333  * leaving access to the 8051.  Assign access now - this constrains the call
6334  * to this routine to be after all LCB set-up is done.  In particular, after
6335  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6336  */
6337 static void init_lcb_access(struct hfi1_devdata *dd)
6338 {
6339         dd->lcb_access_count = 0;
6340 }
6341
6342 /*
6343  * Write a response back to a 8051 request.
6344  */
6345 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6346 {
6347         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6348                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6349                   (u64)return_code <<
6350                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6351                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6352 }
6353
6354 /*
6355  * Handle host requests from the 8051.
6356  */
6357 static void handle_8051_request(struct hfi1_pportdata *ppd)
6358 {
6359         struct hfi1_devdata *dd = ppd->dd;
6360         u64 reg;
6361         u16 data = 0;
6362         u8 type;
6363
6364         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6365         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6366                 return; /* no request */
6367
6368         /* zero out COMPLETED so the response is seen */
6369         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6370
6371         /* extract request details */
6372         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6373                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6374         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6375                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6376
6377         switch (type) {
6378         case HREQ_LOAD_CONFIG:
6379         case HREQ_SAVE_CONFIG:
6380         case HREQ_READ_CONFIG:
6381         case HREQ_SET_TX_EQ_ABS:
6382         case HREQ_SET_TX_EQ_REL:
6383         case HREQ_ENABLE:
6384                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6385                             type);
6386                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6387                 break;
6388         case HREQ_LCB_RESET:
6389                 /* Put the LCB, RX FPE and TX FPE into reset */
6390                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6391                 /* Make sure the write completed */
6392                 (void)read_csr(dd, DCC_CFG_RESET);
6393                 /* Hold the reset long enough to take effect */
6394                 udelay(1);
6395                 /* Take the LCB, RX FPE and TX FPE out of reset */
6396                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6397                 hreq_response(dd, HREQ_SUCCESS, 0);
6398
6399                 break;
6400         case HREQ_CONFIG_DONE:
6401                 hreq_response(dd, HREQ_SUCCESS, 0);
6402                 break;
6403
6404         case HREQ_INTERFACE_TEST:
6405                 hreq_response(dd, HREQ_SUCCESS, data);
6406                 break;
6407         default:
6408                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6409                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6410                 break;
6411         }
6412 }
6413
6414 /*
6415  * Set up allocation unit vaulue.
6416  */
6417 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6418 {
6419         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6420
6421         /* do not modify other values in the register */
6422         reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6423         reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6424         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6425 }
6426
6427 /*
6428  * Set up initial VL15 credits of the remote.  Assumes the rest of
6429  * the CM credit registers are zero from a previous global or credit reset.
6430  * Shared limit for VL15 will always be 0.
6431  */
6432 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6433 {
6434         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6435
6436         /* set initial values for total and shared credit limit */
6437         reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6438                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6439
6440         /*
6441          * Set total limit to be equal to VL15 credits.
6442          * Leave shared limit at 0.
6443          */
6444         reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6445         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6446
6447         write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6448                   << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6449 }
6450
6451 /*
6452  * Zero all credit details from the previous connection and
6453  * reset the CM manager's internal counters.
6454  */
6455 void reset_link_credits(struct hfi1_devdata *dd)
6456 {
6457         int i;
6458
6459         /* remove all previous VL credit limits */
6460         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6461                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6462         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6463         write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6464         /* reset the CM block */
6465         pio_send_control(dd, PSC_CM_RESET);
6466         /* reset cached value */
6467         dd->vl15buf_cached = 0;
6468 }
6469
6470 /* convert a vCU to a CU */
6471 static u32 vcu_to_cu(u8 vcu)
6472 {
6473         return 1 << vcu;
6474 }
6475
6476 /* convert a CU to a vCU */
6477 static u8 cu_to_vcu(u32 cu)
6478 {
6479         return ilog2(cu);
6480 }
6481
6482 /* convert a vAU to an AU */
6483 static u32 vau_to_au(u8 vau)
6484 {
6485         return 8 * (1 << vau);
6486 }
6487
6488 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6489 {
6490         ppd->sm_trap_qp = 0x0;
6491         ppd->sa_qp = 0x1;
6492 }
6493
6494 /*
6495  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6496  */
6497 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6498 {
6499         u64 reg;
6500
6501         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6502         write_csr(dd, DC_LCB_CFG_RUN, 0);
6503         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6504         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6505                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6506         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6507         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6508         reg = read_csr(dd, DCC_CFG_RESET);
6509         write_csr(dd, DCC_CFG_RESET, reg |
6510                   DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6511         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6512         if (!abort) {
6513                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6514                 write_csr(dd, DCC_CFG_RESET, reg);
6515                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6516         }
6517 }
6518
6519 /*
6520  * This routine should be called after the link has been transitioned to
6521  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6522  * reset).
6523  *
6524  * The expectation is that the caller of this routine would have taken
6525  * care of properly transitioning the link into the correct state.
6526  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6527  *       before calling this function.
6528  */
6529 static void _dc_shutdown(struct hfi1_devdata *dd)
6530 {
6531         lockdep_assert_held(&dd->dc8051_lock);
6532
6533         if (dd->dc_shutdown)
6534                 return;
6535
6536         dd->dc_shutdown = 1;
6537         /* Shutdown the LCB */
6538         lcb_shutdown(dd, 1);
6539         /*
6540          * Going to OFFLINE would have causes the 8051 to put the
6541          * SerDes into reset already. Just need to shut down the 8051,
6542          * itself.
6543          */
6544         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6545 }
6546
6547 static void dc_shutdown(struct hfi1_devdata *dd)
6548 {
6549         mutex_lock(&dd->dc8051_lock);
6550         _dc_shutdown(dd);
6551         mutex_unlock(&dd->dc8051_lock);
6552 }
6553
6554 /*
6555  * Calling this after the DC has been brought out of reset should not
6556  * do any damage.
6557  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6558  *       before calling this function.
6559  */
6560 static void _dc_start(struct hfi1_devdata *dd)
6561 {
6562         lockdep_assert_held(&dd->dc8051_lock);
6563
6564         if (!dd->dc_shutdown)
6565                 return;
6566
6567         /* Take the 8051 out of reset */
6568         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6569         /* Wait until 8051 is ready */
6570         if (wait_fm_ready(dd, TIMEOUT_8051_START))
6571                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6572                            __func__);
6573
6574         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6575         write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6576         /* lcb_shutdown() with abort=1 does not restore these */
6577         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6578         dd->dc_shutdown = 0;
6579 }
6580
6581 static void dc_start(struct hfi1_devdata *dd)
6582 {
6583         mutex_lock(&dd->dc8051_lock);
6584         _dc_start(dd);
6585         mutex_unlock(&dd->dc8051_lock);
6586 }
6587
6588 /*
6589  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6590  */
6591 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6592 {
6593         u64 rx_radr, tx_radr;
6594         u32 version;
6595
6596         if (dd->icode != ICODE_FPGA_EMULATION)
6597                 return;
6598
6599         /*
6600          * These LCB defaults on emulator _s are good, nothing to do here:
6601          *      LCB_CFG_TX_FIFOS_RADR
6602          *      LCB_CFG_RX_FIFOS_RADR
6603          *      LCB_CFG_LN_DCLK
6604          *      LCB_CFG_IGNORE_LOST_RCLK
6605          */
6606         if (is_emulator_s(dd))
6607                 return;
6608         /* else this is _p */
6609
6610         version = emulator_rev(dd);
6611         if (!is_ax(dd))
6612                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6613
6614         if (version <= 0x12) {
6615                 /* release 0x12 and below */
6616
6617                 /*
6618                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6619                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6620                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6621                  */
6622                 rx_radr =
6623                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6624                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6625                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6626                 /*
6627                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6628                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6629                  */
6630                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6631         } else if (version <= 0x18) {
6632                 /* release 0x13 up to 0x18 */
6633                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6634                 rx_radr =
6635                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6636                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6637                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6638                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6639         } else if (version == 0x19) {
6640                 /* release 0x19 */
6641                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6642                 rx_radr =
6643                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6644                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6645                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6646                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6647         } else if (version == 0x1a) {
6648                 /* release 0x1a */
6649                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6650                 rx_radr =
6651                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6652                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6653                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6654                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6655                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6656         } else {
6657                 /* release 0x1b and higher */
6658                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6659                 rx_radr =
6660                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6661                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6662                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6663                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6664         }
6665
6666         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6667         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6668         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6669                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6670         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6671 }
6672
6673 /*
6674  * Handle a SMA idle message
6675  *
6676  * This is a work-queue function outside of the interrupt.
6677  */
6678 void handle_sma_message(struct work_struct *work)
6679 {
6680         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6681                                                         sma_message_work);
6682         struct hfi1_devdata *dd = ppd->dd;
6683         u64 msg;
6684         int ret;
6685
6686         /*
6687          * msg is bytes 1-4 of the 40-bit idle message - the command code
6688          * is stripped off
6689          */
6690         ret = read_idle_sma(dd, &msg);
6691         if (ret)
6692                 return;
6693         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6694         /*
6695          * React to the SMA message.  Byte[1] (0 for us) is the command.
6696          */
6697         switch (msg & 0xff) {
6698         case SMA_IDLE_ARM:
6699                 /*
6700                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6701                  * State Transitions
6702                  *
6703                  * Only expected in INIT or ARMED, discard otherwise.
6704                  */
6705                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6706                         ppd->neighbor_normal = 1;
6707                 break;
6708         case SMA_IDLE_ACTIVE:
6709                 /*
6710                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6711                  * State Transitions
6712                  *
6713                  * Can activate the node.  Discard otherwise.
6714                  */
6715                 if (ppd->host_link_state == HLS_UP_ARMED &&
6716                     ppd->is_active_optimize_enabled) {
6717                         ppd->neighbor_normal = 1;
6718                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6719                         if (ret)
6720                                 dd_dev_err(
6721                                         dd,
6722                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6723                                         __func__);
6724                 }
6725                 break;
6726         default:
6727                 dd_dev_err(dd,
6728                            "%s: received unexpected SMA idle message 0x%llx\n",
6729                            __func__, msg);
6730                 break;
6731         }
6732 }
6733
6734 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6735 {
6736         u64 rcvctrl;
6737         unsigned long flags;
6738
6739         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6740         rcvctrl = read_csr(dd, RCV_CTRL);
6741         rcvctrl |= add;
6742         rcvctrl &= ~clear;
6743         write_csr(dd, RCV_CTRL, rcvctrl);
6744         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6745 }
6746
6747 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6748 {
6749         adjust_rcvctrl(dd, add, 0);
6750 }
6751
6752 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6753 {
6754         adjust_rcvctrl(dd, 0, clear);
6755 }
6756
6757 /*
6758  * Called from all interrupt handlers to start handling an SPC freeze.
6759  */
6760 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6761 {
6762         struct hfi1_devdata *dd = ppd->dd;
6763         struct send_context *sc;
6764         int i;
6765         int sc_flags;
6766
6767         if (flags & FREEZE_SELF)
6768                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6769
6770         /* enter frozen mode */
6771         dd->flags |= HFI1_FROZEN;
6772
6773         /* notify all SDMA engines that they are going into a freeze */
6774         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6775
6776         sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6777                                               SCF_LINK_DOWN : 0);
6778         /* do halt pre-handling on all enabled send contexts */
6779         for (i = 0; i < dd->num_send_contexts; i++) {
6780                 sc = dd->send_contexts[i].sc;
6781                 if (sc && (sc->flags & SCF_ENABLED))
6782                         sc_stop(sc, sc_flags);
6783         }
6784
6785         /* Send context are frozen. Notify user space */
6786         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6787
6788         if (flags & FREEZE_ABORT) {
6789                 dd_dev_err(dd,
6790                            "Aborted freeze recovery. Please REBOOT system\n");
6791                 return;
6792         }
6793         /* queue non-interrupt handler */
6794         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6795 }
6796
6797 /*
6798  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6799  * depending on the "freeze" parameter.
6800  *
6801  * No need to return an error if it times out, our only option
6802  * is to proceed anyway.
6803  */
6804 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6805 {
6806         unsigned long timeout;
6807         u64 reg;
6808
6809         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6810         while (1) {
6811                 reg = read_csr(dd, CCE_STATUS);
6812                 if (freeze) {
6813                         /* waiting until all indicators are set */
6814                         if ((reg & ALL_FROZE) == ALL_FROZE)
6815                                 return; /* all done */
6816                 } else {
6817                         /* waiting until all indicators are clear */
6818                         if ((reg & ALL_FROZE) == 0)
6819                                 return; /* all done */
6820                 }
6821
6822                 if (time_after(jiffies, timeout)) {
6823                         dd_dev_err(dd,
6824                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6825                                    freeze ? "" : "un", reg & ALL_FROZE,
6826                                    freeze ? ALL_FROZE : 0ull);
6827                         return;
6828                 }
6829                 usleep_range(80, 120);
6830         }
6831 }
6832
6833 /*
6834  * Do all freeze handling for the RXE block.
6835  */
6836 static void rxe_freeze(struct hfi1_devdata *dd)
6837 {
6838         int i;
6839         struct hfi1_ctxtdata *rcd;
6840
6841         /* disable port */
6842         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6843
6844         /* disable all receive contexts */
6845         for (i = 0; i < dd->num_rcv_contexts; i++) {
6846                 rcd = hfi1_rcd_get_by_index(dd, i);
6847                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6848                 hfi1_rcd_put(rcd);
6849         }
6850 }
6851
6852 /*
6853  * Unfreeze handling for the RXE block - kernel contexts only.
6854  * This will also enable the port.  User contexts will do unfreeze
6855  * handling on a per-context basis as they call into the driver.
6856  *
6857  */
6858 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6859 {
6860         u32 rcvmask;
6861         u16 i;
6862         struct hfi1_ctxtdata *rcd;
6863
6864         /* enable all kernel contexts */
6865         for (i = 0; i < dd->num_rcv_contexts; i++) {
6866                 rcd = hfi1_rcd_get_by_index(dd, i);
6867
6868                 /* Ensure all non-user contexts(including vnic) are enabled */
6869                 if (!rcd ||
6870                     (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6871                         hfi1_rcd_put(rcd);
6872                         continue;
6873                 }
6874                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6875                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6876                 rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ?
6877                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6878                 hfi1_rcvctrl(dd, rcvmask, rcd);
6879                 hfi1_rcd_put(rcd);
6880         }
6881
6882         /* enable port */
6883         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6884 }
6885
6886 /*
6887  * Non-interrupt SPC freeze handling.
6888  *
6889  * This is a work-queue function outside of the triggering interrupt.
6890  */
6891 void handle_freeze(struct work_struct *work)
6892 {
6893         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6894                                                                 freeze_work);
6895         struct hfi1_devdata *dd = ppd->dd;
6896
6897         /* wait for freeze indicators on all affected blocks */
6898         wait_for_freeze_status(dd, 1);
6899
6900         /* SPC is now frozen */
6901
6902         /* do send PIO freeze steps */
6903         pio_freeze(dd);
6904
6905         /* do send DMA freeze steps */
6906         sdma_freeze(dd);
6907
6908         /* do send egress freeze steps - nothing to do */
6909
6910         /* do receive freeze steps */
6911         rxe_freeze(dd);
6912
6913         /*
6914          * Unfreeze the hardware - clear the freeze, wait for each
6915          * block's frozen bit to clear, then clear the frozen flag.
6916          */
6917         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6918         wait_for_freeze_status(dd, 0);
6919
6920         if (is_ax(dd)) {
6921                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6922                 wait_for_freeze_status(dd, 1);
6923                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6924                 wait_for_freeze_status(dd, 0);
6925         }
6926
6927         /* do send PIO unfreeze steps for kernel contexts */
6928         pio_kernel_unfreeze(dd);
6929
6930         /* do send DMA unfreeze steps */
6931         sdma_unfreeze(dd);
6932
6933         /* do send egress unfreeze steps - nothing to do */
6934
6935         /* do receive unfreeze steps for kernel contexts */
6936         rxe_kernel_unfreeze(dd);
6937
6938         /*
6939          * The unfreeze procedure touches global device registers when
6940          * it disables and re-enables RXE. Mark the device unfrozen
6941          * after all that is done so other parts of the driver waiting
6942          * for the device to unfreeze don't do things out of order.
6943          *
6944          * The above implies that the meaning of HFI1_FROZEN flag is
6945          * "Device has gone into freeze mode and freeze mode handling
6946          * is still in progress."
6947          *
6948          * The flag will be removed when freeze mode processing has
6949          * completed.
6950          */
6951         dd->flags &= ~HFI1_FROZEN;
6952         wake_up(&dd->event_queue);
6953
6954         /* no longer frozen */
6955 }
6956
6957 /**
6958  * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6959  * counters.
6960  * @ppd: info of physical Hfi port
6961  * @link_width: new link width after link up or downgrade
6962  *
6963  * Update the PortXmitWait and PortVlXmitWait counters after
6964  * a link up or downgrade event to reflect a link width change.
6965  */
6966 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6967 {
6968         int i;
6969         u16 tx_width;
6970         u16 link_speed;
6971
6972         tx_width = tx_link_width(link_width);
6973         link_speed = get_link_speed(ppd->link_speed_active);
6974
6975         /*
6976          * There are C_VL_COUNT number of PortVLXmitWait counters.
6977          * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6978          */
6979         for (i = 0; i < C_VL_COUNT + 1; i++)
6980                 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6981 }
6982
6983 /*
6984  * Handle a link up interrupt from the 8051.
6985  *
6986  * This is a work-queue function outside of the interrupt.
6987  */
6988 void handle_link_up(struct work_struct *work)
6989 {
6990         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6991                                                   link_up_work);
6992         struct hfi1_devdata *dd = ppd->dd;
6993
6994         set_link_state(ppd, HLS_UP_INIT);
6995
6996         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6997         read_ltp_rtt(dd);
6998         /*
6999          * OPA specifies that certain counters are cleared on a transition
7000          * to link up, so do that.
7001          */
7002         clear_linkup_counters(dd);
7003         /*
7004          * And (re)set link up default values.
7005          */
7006         set_linkup_defaults(ppd);
7007
7008         /*
7009          * Set VL15 credits. Use cached value from verify cap interrupt.
7010          * In case of quick linkup or simulator, vl15 value will be set by
7011          * handle_linkup_change. VerifyCap interrupt handler will not be
7012          * called in those scenarios.
7013          */
7014         if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
7015                 set_up_vl15(dd, dd->vl15buf_cached);
7016
7017         /* enforce link speed enabled */
7018         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7019                 /* oops - current speed is not enabled, bounce */
7020                 dd_dev_err(dd,
7021                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7022                            ppd->link_speed_active, ppd->link_speed_enabled);
7023                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7024                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
7025                 set_link_state(ppd, HLS_DN_OFFLINE);
7026                 start_link(ppd);
7027         }
7028 }
7029
7030 /*
7031  * Several pieces of LNI information were cached for SMA in ppd.
7032  * Reset these on link down
7033  */
7034 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7035 {
7036         ppd->neighbor_guid = 0;
7037         ppd->neighbor_port_number = 0;
7038         ppd->neighbor_type = 0;
7039         ppd->neighbor_fm_security = 0;
7040 }
7041
7042 static const char * const link_down_reason_strs[] = {
7043         [OPA_LINKDOWN_REASON_NONE] = "None",
7044         [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7045         [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7046         [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7047         [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7048         [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7049         [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7050         [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7051         [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7052         [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7053         [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7054         [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7055         [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7056         [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7057         [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7058         [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7059         [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7060         [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7061         [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7062         [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7063         [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7064         [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7065         [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7066         [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7067         [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7068         [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7069         [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7070         [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7071         [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7072         [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7073         [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7074         [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7075         [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7076                                         "Excessive buffer overrun",
7077         [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7078         [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7079         [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7080         [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7081         [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7082         [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7083         [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7084         [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7085                                         "Local media not installed",
7086         [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7087         [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7088         [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7089                                         "End to end not installed",
7090         [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7091         [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7092         [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7093         [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7094         [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7095         [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7096 };
7097
7098 /* return the neighbor link down reason string */
7099 static const char *link_down_reason_str(u8 reason)
7100 {
7101         const char *str = NULL;
7102
7103         if (reason < ARRAY_SIZE(link_down_reason_strs))
7104                 str = link_down_reason_strs[reason];
7105         if (!str)
7106                 str = "(invalid)";
7107
7108         return str;
7109 }
7110
7111 /*
7112  * Handle a link down interrupt from the 8051.
7113  *
7114  * This is a work-queue function outside of the interrupt.
7115  */
7116 void handle_link_down(struct work_struct *work)
7117 {
7118         u8 lcl_reason, neigh_reason = 0;
7119         u8 link_down_reason;
7120         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7121                                                   link_down_work);
7122         int was_up;
7123         static const char ldr_str[] = "Link down reason: ";
7124
7125         if ((ppd->host_link_state &
7126              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7127              ppd->port_type == PORT_TYPE_FIXED)
7128                 ppd->offline_disabled_reason =
7129                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7130
7131         /* Go offline first, then deal with reading/writing through 8051 */
7132         was_up = !!(ppd->host_link_state & HLS_UP);
7133         set_link_state(ppd, HLS_DN_OFFLINE);
7134         xchg(&ppd->is_link_down_queued, 0);
7135
7136         if (was_up) {
7137                 lcl_reason = 0;
7138                 /* link down reason is only valid if the link was up */
7139                 read_link_down_reason(ppd->dd, &link_down_reason);
7140                 switch (link_down_reason) {
7141                 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7142                         /* the link went down, no idle message reason */
7143                         dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7144                                     ldr_str);
7145                         break;
7146                 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7147                         /*
7148                          * The neighbor reason is only valid if an idle message
7149                          * was received for it.
7150                          */
7151                         read_planned_down_reason_code(ppd->dd, &neigh_reason);
7152                         dd_dev_info(ppd->dd,
7153                                     "%sNeighbor link down message %d, %s\n",
7154                                     ldr_str, neigh_reason,
7155                                     link_down_reason_str(neigh_reason));
7156                         break;
7157                 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7158                         dd_dev_info(ppd->dd,
7159                                     "%sHost requested link to go offline\n",
7160                                     ldr_str);
7161                         break;
7162                 default:
7163                         dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7164                                     ldr_str, link_down_reason);
7165                         break;
7166                 }
7167
7168                 /*
7169                  * If no reason, assume peer-initiated but missed
7170                  * LinkGoingDown idle flits.
7171                  */
7172                 if (neigh_reason == 0)
7173                         lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7174         } else {
7175                 /* went down while polling or going up */
7176                 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7177         }
7178
7179         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7180
7181         /* inform the SMA when the link transitions from up to down */
7182         if (was_up && ppd->local_link_down_reason.sma == 0 &&
7183             ppd->neigh_link_down_reason.sma == 0) {
7184                 ppd->local_link_down_reason.sma =
7185                                         ppd->local_link_down_reason.latest;
7186                 ppd->neigh_link_down_reason.sma =
7187                                         ppd->neigh_link_down_reason.latest;
7188         }
7189
7190         reset_neighbor_info(ppd);
7191
7192         /* disable the port */
7193         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7194
7195         /*
7196          * If there is no cable attached, turn the DC off. Otherwise,
7197          * start the link bring up.
7198          */
7199         if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7200                 dc_shutdown(ppd->dd);
7201         else
7202                 start_link(ppd);
7203 }
7204
7205 void handle_link_bounce(struct work_struct *work)
7206 {
7207         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7208                                                         link_bounce_work);
7209
7210         /*
7211          * Only do something if the link is currently up.
7212          */
7213         if (ppd->host_link_state & HLS_UP) {
7214                 set_link_state(ppd, HLS_DN_OFFLINE);
7215                 start_link(ppd);
7216         } else {
7217                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7218                             __func__, link_state_name(ppd->host_link_state));
7219         }
7220 }
7221
7222 /*
7223  * Mask conversion: Capability exchange to Port LTP.  The capability
7224  * exchange has an implicit 16b CRC that is mandatory.
7225  */
7226 static int cap_to_port_ltp(int cap)
7227 {
7228         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7229
7230         if (cap & CAP_CRC_14B)
7231                 port_ltp |= PORT_LTP_CRC_MODE_14;
7232         if (cap & CAP_CRC_48B)
7233                 port_ltp |= PORT_LTP_CRC_MODE_48;
7234         if (cap & CAP_CRC_12B_16B_PER_LANE)
7235                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7236
7237         return port_ltp;
7238 }
7239
7240 /*
7241  * Convert an OPA Port LTP mask to capability mask
7242  */
7243 int port_ltp_to_cap(int port_ltp)
7244 {
7245         int cap_mask = 0;
7246
7247         if (port_ltp & PORT_LTP_CRC_MODE_14)
7248                 cap_mask |= CAP_CRC_14B;
7249         if (port_ltp & PORT_LTP_CRC_MODE_48)
7250                 cap_mask |= CAP_CRC_48B;
7251         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7252                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7253
7254         return cap_mask;
7255 }
7256
7257 /*
7258  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7259  */
7260 static int lcb_to_port_ltp(int lcb_crc)
7261 {
7262         int port_ltp = 0;
7263
7264         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7265                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7266         else if (lcb_crc == LCB_CRC_48B)
7267                 port_ltp = PORT_LTP_CRC_MODE_48;
7268         else if (lcb_crc == LCB_CRC_14B)
7269                 port_ltp = PORT_LTP_CRC_MODE_14;
7270         else
7271                 port_ltp = PORT_LTP_CRC_MODE_16;
7272
7273         return port_ltp;
7274 }
7275
7276 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7277 {
7278         if (ppd->pkeys[2] != 0) {
7279                 ppd->pkeys[2] = 0;
7280                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7281                 hfi1_event_pkey_change(ppd->dd, ppd->port);
7282         }
7283 }
7284
7285 /*
7286  * Convert the given link width to the OPA link width bitmask.
7287  */
7288 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7289 {
7290         switch (width) {
7291         case 0:
7292                 /*
7293                  * Simulator and quick linkup do not set the width.
7294                  * Just set it to 4x without complaint.
7295                  */
7296                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7297                         return OPA_LINK_WIDTH_4X;
7298                 return 0; /* no lanes up */
7299         case 1: return OPA_LINK_WIDTH_1X;
7300         case 2: return OPA_LINK_WIDTH_2X;
7301         case 3: return OPA_LINK_WIDTH_3X;
7302         default:
7303                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7304                             __func__, width);
7305                 /* fall through */
7306         case 4: return OPA_LINK_WIDTH_4X;
7307         }
7308 }
7309
7310 /*
7311  * Do a population count on the bottom nibble.
7312  */
7313 static const u8 bit_counts[16] = {
7314         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7315 };
7316
7317 static inline u8 nibble_to_count(u8 nibble)
7318 {
7319         return bit_counts[nibble & 0xf];
7320 }
7321
7322 /*
7323  * Read the active lane information from the 8051 registers and return
7324  * their widths.
7325  *
7326  * Active lane information is found in these 8051 registers:
7327  *      enable_lane_tx
7328  *      enable_lane_rx
7329  */
7330 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7331                             u16 *rx_width)
7332 {
7333         u16 tx, rx;
7334         u8 enable_lane_rx;
7335         u8 enable_lane_tx;
7336         u8 tx_polarity_inversion;
7337         u8 rx_polarity_inversion;
7338         u8 max_rate;
7339
7340         /* read the active lanes */
7341         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7342                          &rx_polarity_inversion, &max_rate);
7343         read_local_lni(dd, &enable_lane_rx);
7344
7345         /* convert to counts */
7346         tx = nibble_to_count(enable_lane_tx);
7347         rx = nibble_to_count(enable_lane_rx);
7348
7349         /*
7350          * Set link_speed_active here, overriding what was set in
7351          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7352          * set the max_rate field in handle_verify_cap until v0.19.
7353          */
7354         if ((dd->icode == ICODE_RTL_SILICON) &&
7355             (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7356                 /* max_rate: 0 = 12.5G, 1 = 25G */
7357                 switch (max_rate) {
7358                 case 0:
7359                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7360                         break;
7361                 default:
7362                         dd_dev_err(dd,
7363                                    "%s: unexpected max rate %d, using 25Gb\n",
7364                                    __func__, (int)max_rate);
7365                         /* fall through */
7366                 case 1:
7367                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7368                         break;
7369                 }
7370         }
7371
7372         dd_dev_info(dd,
7373                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7374                     enable_lane_tx, tx, enable_lane_rx, rx);
7375         *tx_width = link_width_to_bits(dd, tx);
7376         *rx_width = link_width_to_bits(dd, rx);
7377 }
7378
7379 /*
7380  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7381  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7382  * after link up.  I.e. look elsewhere for downgrade information.
7383  *
7384  * Bits are:
7385  *      + bits [7:4] contain the number of active transmitters
7386  *      + bits [3:0] contain the number of active receivers
7387  * These are numbers 1 through 4 and can be different values if the
7388  * link is asymmetric.
7389  *
7390  * verify_cap_local_fm_link_width[0] retains its original value.
7391  */
7392 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7393                               u16 *rx_width)
7394 {
7395         u16 widths, tx, rx;
7396         u8 misc_bits, local_flags;
7397         u16 active_tx, active_rx;
7398
7399         read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7400         tx = widths >> 12;
7401         rx = (widths >> 8) & 0xf;
7402
7403         *tx_width = link_width_to_bits(dd, tx);
7404         *rx_width = link_width_to_bits(dd, rx);
7405
7406         /* print the active widths */
7407         get_link_widths(dd, &active_tx, &active_rx);
7408 }
7409
7410 /*
7411  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7412  * hardware information when the link first comes up.
7413  *
7414  * The link width is not available until after VerifyCap.AllFramesReceived
7415  * (the trigger for handle_verify_cap), so this is outside that routine
7416  * and should be called when the 8051 signals linkup.
7417  */
7418 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7419 {
7420         u16 tx_width, rx_width;
7421
7422         /* get end-of-LNI link widths */
7423         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7424
7425         /* use tx_width as the link is supposed to be symmetric on link up */
7426         ppd->link_width_active = tx_width;
7427         /* link width downgrade active (LWD.A) starts out matching LW.A */
7428         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7429         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7430         /* per OPA spec, on link up LWD.E resets to LWD.S */
7431         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7432         /* cache the active egress rate (units {10^6 bits/sec]) */
7433         ppd->current_egress_rate = active_egress_rate(ppd);
7434 }
7435
7436 /*
7437  * Handle a verify capabilities interrupt from the 8051.
7438  *
7439  * This is a work-queue function outside of the interrupt.
7440  */
7441 void handle_verify_cap(struct work_struct *work)
7442 {
7443         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7444                                                                 link_vc_work);
7445         struct hfi1_devdata *dd = ppd->dd;
7446         u64 reg;
7447         u8 power_management;
7448         u8 continuous;
7449         u8 vcu;
7450         u8 vau;
7451         u8 z;
7452         u16 vl15buf;
7453         u16 link_widths;
7454         u16 crc_mask;
7455         u16 crc_val;
7456         u16 device_id;
7457         u16 active_tx, active_rx;
7458         u8 partner_supported_crc;
7459         u8 remote_tx_rate;
7460         u8 device_rev;
7461
7462         set_link_state(ppd, HLS_VERIFY_CAP);
7463
7464         lcb_shutdown(dd, 0);
7465         adjust_lcb_for_fpga_serdes(dd);
7466
7467         read_vc_remote_phy(dd, &power_management, &continuous);
7468         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7469                               &partner_supported_crc);
7470         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7471         read_remote_device_id(dd, &device_id, &device_rev);
7472
7473         /* print the active widths */
7474         get_link_widths(dd, &active_tx, &active_rx);
7475         dd_dev_info(dd,
7476                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7477                     (int)power_management, (int)continuous);
7478         dd_dev_info(dd,
7479                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7480                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7481                     (int)partner_supported_crc);
7482         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7483                     (u32)remote_tx_rate, (u32)link_widths);
7484         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7485                     (u32)device_id, (u32)device_rev);
7486         /*
7487          * The peer vAU value just read is the peer receiver value.  HFI does
7488          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7489          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7490          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7491          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7492          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7493          * subject to the Z value exception.
7494          */
7495         if (vau == 0)
7496                 vau = 1;
7497         set_up_vau(dd, vau);
7498
7499         /*
7500          * Set VL15 credits to 0 in global credit register. Cache remote VL15
7501          * credits value and wait for link-up interrupt ot set it.
7502          */
7503         set_up_vl15(dd, 0);
7504         dd->vl15buf_cached = vl15buf;
7505
7506         /* set up the LCB CRC mode */
7507         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7508
7509         /* order is important: use the lowest bit in common */
7510         if (crc_mask & CAP_CRC_14B)
7511                 crc_val = LCB_CRC_14B;
7512         else if (crc_mask & CAP_CRC_48B)
7513                 crc_val = LCB_CRC_48B;
7514         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7515                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7516         else
7517                 crc_val = LCB_CRC_16B;
7518
7519         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7520         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7521                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7522
7523         /* set (14b only) or clear sideband credit */
7524         reg = read_csr(dd, SEND_CM_CTRL);
7525         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7526                 write_csr(dd, SEND_CM_CTRL,
7527                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7528         } else {
7529                 write_csr(dd, SEND_CM_CTRL,
7530                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7531         }
7532
7533         ppd->link_speed_active = 0;     /* invalid value */
7534         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7535                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7536                 switch (remote_tx_rate) {
7537                 case 0:
7538                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7539                         break;
7540                 case 1:
7541                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7542                         break;
7543                 }
7544         } else {
7545                 /* actual rate is highest bit of the ANDed rates */
7546                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7547
7548                 if (rate & 2)
7549                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7550                 else if (rate & 1)
7551                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7552         }
7553         if (ppd->link_speed_active == 0) {
7554                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7555                            __func__, (int)remote_tx_rate);
7556                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7557         }
7558
7559         /*
7560          * Cache the values of the supported, enabled, and active
7561          * LTP CRC modes to return in 'portinfo' queries. But the bit
7562          * flags that are returned in the portinfo query differ from
7563          * what's in the link_crc_mask, crc_sizes, and crc_val
7564          * variables. Convert these here.
7565          */
7566         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7567                 /* supported crc modes */
7568         ppd->port_ltp_crc_mode |=
7569                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7570                 /* enabled crc modes */
7571         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7572                 /* active crc mode */
7573
7574         /* set up the remote credit return table */
7575         assign_remote_cm_au_table(dd, vcu);
7576
7577         /*
7578          * The LCB is reset on entry to handle_verify_cap(), so this must
7579          * be applied on every link up.
7580          *
7581          * Adjust LCB error kill enable to kill the link if
7582          * these RBUF errors are seen:
7583          *      REPLAY_BUF_MBE_SMASK
7584          *      FLIT_INPUT_BUF_MBE_SMASK
7585          */
7586         if (is_ax(dd)) {                        /* fixed in B0 */
7587                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7588                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7589                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7590                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7591         }
7592
7593         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7594         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7595
7596         /* give 8051 access to the LCB CSRs */
7597         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7598         set_8051_lcb_access(dd);
7599
7600         /* tell the 8051 to go to LinkUp */
7601         set_link_state(ppd, HLS_GOING_UP);
7602 }
7603
7604 /**
7605  * apply_link_downgrade_policy - Apply the link width downgrade enabled
7606  * policy against the current active link widths.
7607  * @ppd: info of physical Hfi port
7608  * @refresh_widths: True indicates link downgrade event
7609  * @return: True indicates a successful link downgrade. False indicates
7610  *          link downgrade event failed and the link will bounce back to
7611  *          default link width.
7612  *
7613  * Called when the enabled policy changes or the active link widths
7614  * change.
7615  * Refresh_widths indicates that a link downgrade occurred. The
7616  * link_downgraded variable is set by refresh_widths and
7617  * determines the success/failure of the policy application.
7618  */
7619 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7620                                  bool refresh_widths)
7621 {
7622         int do_bounce = 0;
7623         int tries;
7624         u16 lwde;
7625         u16 tx, rx;
7626         bool link_downgraded = refresh_widths;
7627
7628         /* use the hls lock to avoid a race with actual link up */
7629         tries = 0;
7630 retry:
7631         mutex_lock(&ppd->hls_lock);
7632         /* only apply if the link is up */
7633         if (ppd->host_link_state & HLS_DOWN) {
7634                 /* still going up..wait and retry */
7635                 if (ppd->host_link_state & HLS_GOING_UP) {
7636                         if (++tries < 1000) {
7637                                 mutex_unlock(&ppd->hls_lock);
7638                                 usleep_range(100, 120); /* arbitrary */
7639                                 goto retry;
7640                         }
7641                         dd_dev_err(ppd->dd,
7642                                    "%s: giving up waiting for link state change\n",
7643                                    __func__);
7644                 }
7645                 goto done;
7646         }
7647
7648         lwde = ppd->link_width_downgrade_enabled;
7649
7650         if (refresh_widths) {
7651                 get_link_widths(ppd->dd, &tx, &rx);
7652                 ppd->link_width_downgrade_tx_active = tx;
7653                 ppd->link_width_downgrade_rx_active = rx;
7654         }
7655
7656         if (ppd->link_width_downgrade_tx_active == 0 ||
7657             ppd->link_width_downgrade_rx_active == 0) {
7658                 /* the 8051 reported a dead link as a downgrade */
7659                 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7660                 link_downgraded = false;
7661         } else if (lwde == 0) {
7662                 /* downgrade is disabled */
7663
7664                 /* bounce if not at starting active width */
7665                 if ((ppd->link_width_active !=
7666                      ppd->link_width_downgrade_tx_active) ||
7667                     (ppd->link_width_active !=
7668                      ppd->link_width_downgrade_rx_active)) {
7669                         dd_dev_err(ppd->dd,
7670                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7671                         dd_dev_err(ppd->dd,
7672                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7673                                    ppd->link_width_active,
7674                                    ppd->link_width_downgrade_tx_active,
7675                                    ppd->link_width_downgrade_rx_active);
7676                         do_bounce = 1;
7677                         link_downgraded = false;
7678                 }
7679         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7680                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7681                 /* Tx or Rx is outside the enabled policy */
7682                 dd_dev_err(ppd->dd,
7683                            "Link is outside of downgrade allowed, downing link\n");
7684                 dd_dev_err(ppd->dd,
7685                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7686                            lwde, ppd->link_width_downgrade_tx_active,
7687                            ppd->link_width_downgrade_rx_active);
7688                 do_bounce = 1;
7689                 link_downgraded = false;
7690         }
7691
7692 done:
7693         mutex_unlock(&ppd->hls_lock);
7694
7695         if (do_bounce) {
7696                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7697                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7698                 set_link_state(ppd, HLS_DN_OFFLINE);
7699                 start_link(ppd);
7700         }
7701
7702         return link_downgraded;
7703 }
7704
7705 /*
7706  * Handle a link downgrade interrupt from the 8051.
7707  *
7708  * This is a work-queue function outside of the interrupt.
7709  */
7710 void handle_link_downgrade(struct work_struct *work)
7711 {
7712         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7713                                                         link_downgrade_work);
7714
7715         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7716         if (apply_link_downgrade_policy(ppd, true))
7717                 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7718 }
7719
7720 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7721 {
7722         return flag_string(buf, buf_len, flags, dcc_err_flags,
7723                 ARRAY_SIZE(dcc_err_flags));
7724 }
7725
7726 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7727 {
7728         return flag_string(buf, buf_len, flags, lcb_err_flags,
7729                 ARRAY_SIZE(lcb_err_flags));
7730 }
7731
7732 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7733 {
7734         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7735                 ARRAY_SIZE(dc8051_err_flags));
7736 }
7737
7738 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7739 {
7740         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7741                 ARRAY_SIZE(dc8051_info_err_flags));
7742 }
7743
7744 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7745 {
7746         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7747                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7748 }
7749
7750 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7751 {
7752         struct hfi1_pportdata *ppd = dd->pport;
7753         u64 info, err, host_msg;
7754         int queue_link_down = 0;
7755         char buf[96];
7756
7757         /* look at the flags */
7758         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7759                 /* 8051 information set by firmware */
7760                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7761                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7762                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7763                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7764                 host_msg = (info >>
7765                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7766                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7767
7768                 /*
7769                  * Handle error flags.
7770                  */
7771                 if (err & FAILED_LNI) {
7772                         /*
7773                          * LNI error indications are cleared by the 8051
7774                          * only when starting polling.  Only pay attention
7775                          * to them when in the states that occur during
7776                          * LNI.
7777                          */
7778                         if (ppd->host_link_state
7779                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7780                                 queue_link_down = 1;
7781                                 dd_dev_info(dd, "Link error: %s\n",
7782                                             dc8051_info_err_string(buf,
7783                                                                    sizeof(buf),
7784                                                                    err &
7785                                                                    FAILED_LNI));
7786                         }
7787                         err &= ~(u64)FAILED_LNI;
7788                 }
7789                 /* unknown frames can happen durning LNI, just count */
7790                 if (err & UNKNOWN_FRAME) {
7791                         ppd->unknown_frame_count++;
7792                         err &= ~(u64)UNKNOWN_FRAME;
7793                 }
7794                 if (err) {
7795                         /* report remaining errors, but do not do anything */
7796                         dd_dev_err(dd, "8051 info error: %s\n",
7797                                    dc8051_info_err_string(buf, sizeof(buf),
7798                                                           err));
7799                 }
7800
7801                 /*
7802                  * Handle host message flags.
7803                  */
7804                 if (host_msg & HOST_REQ_DONE) {
7805                         /*
7806                          * Presently, the driver does a busy wait for
7807                          * host requests to complete.  This is only an
7808                          * informational message.
7809                          * NOTE: The 8051 clears the host message
7810                          * information *on the next 8051 command*.
7811                          * Therefore, when linkup is achieved,
7812                          * this flag will still be set.
7813                          */
7814                         host_msg &= ~(u64)HOST_REQ_DONE;
7815                 }
7816                 if (host_msg & BC_SMA_MSG) {
7817                         queue_work(ppd->link_wq, &ppd->sma_message_work);
7818                         host_msg &= ~(u64)BC_SMA_MSG;
7819                 }
7820                 if (host_msg & LINKUP_ACHIEVED) {
7821                         dd_dev_info(dd, "8051: Link up\n");
7822                         queue_work(ppd->link_wq, &ppd->link_up_work);
7823                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7824                 }
7825                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7826                         handle_8051_request(ppd);
7827                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7828                 }
7829                 if (host_msg & VERIFY_CAP_FRAME) {
7830                         queue_work(ppd->link_wq, &ppd->link_vc_work);
7831                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7832                 }
7833                 if (host_msg & LINK_GOING_DOWN) {
7834                         const char *extra = "";
7835                         /* no downgrade action needed if going down */
7836                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7837                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7838                                 extra = " (ignoring downgrade)";
7839                         }
7840                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7841                         queue_link_down = 1;
7842                         host_msg &= ~(u64)LINK_GOING_DOWN;
7843                 }
7844                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7845                         queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7846                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7847                 }
7848                 if (host_msg) {
7849                         /* report remaining messages, but do not do anything */
7850                         dd_dev_info(dd, "8051 info host message: %s\n",
7851                                     dc8051_info_host_msg_string(buf,
7852                                                                 sizeof(buf),
7853                                                                 host_msg));
7854                 }
7855
7856                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7857         }
7858         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7859                 /*
7860                  * Lost the 8051 heartbeat.  If this happens, we
7861                  * receive constant interrupts about it.  Disable
7862                  * the interrupt after the first.
7863                  */
7864                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7865                 write_csr(dd, DC_DC8051_ERR_EN,
7866                           read_csr(dd, DC_DC8051_ERR_EN) &
7867                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7868
7869                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7870         }
7871         if (reg) {
7872                 /* report the error, but do not do anything */
7873                 dd_dev_err(dd, "8051 error: %s\n",
7874                            dc8051_err_string(buf, sizeof(buf), reg));
7875         }
7876
7877         if (queue_link_down) {
7878                 /*
7879                  * if the link is already going down or disabled, do not
7880                  * queue another. If there's a link down entry already
7881                  * queued, don't queue another one.
7882                  */
7883                 if ((ppd->host_link_state &
7884                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7885                     ppd->link_enabled == 0) {
7886                         dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7887                                     __func__, ppd->host_link_state,
7888                                     ppd->link_enabled);
7889                 } else {
7890                         if (xchg(&ppd->is_link_down_queued, 1) == 1)
7891                                 dd_dev_info(dd,
7892                                             "%s: link down request already queued\n",
7893                                             __func__);
7894                         else
7895                                 queue_work(ppd->link_wq, &ppd->link_down_work);
7896                 }
7897         }
7898 }
7899
7900 static const char * const fm_config_txt[] = {
7901 [0] =
7902         "BadHeadDist: Distance violation between two head flits",
7903 [1] =
7904         "BadTailDist: Distance violation between two tail flits",
7905 [2] =
7906         "BadCtrlDist: Distance violation between two credit control flits",
7907 [3] =
7908         "BadCrdAck: Credits return for unsupported VL",
7909 [4] =
7910         "UnsupportedVLMarker: Received VL Marker",
7911 [5] =
7912         "BadPreempt: Exceeded the preemption nesting level",
7913 [6] =
7914         "BadControlFlit: Received unsupported control flit",
7915 /* no 7 */
7916 [8] =
7917         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7918 };
7919
7920 static const char * const port_rcv_txt[] = {
7921 [1] =
7922         "BadPktLen: Illegal PktLen",
7923 [2] =
7924         "PktLenTooLong: Packet longer than PktLen",
7925 [3] =
7926         "PktLenTooShort: Packet shorter than PktLen",
7927 [4] =
7928         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7929 [5] =
7930         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7931 [6] =
7932         "BadL2: Illegal L2 opcode",
7933 [7] =
7934         "BadSC: Unsupported SC",
7935 [9] =
7936         "BadRC: Illegal RC",
7937 [11] =
7938         "PreemptError: Preempting with same VL",
7939 [12] =
7940         "PreemptVL15: Preempting a VL15 packet",
7941 };
7942
7943 #define OPA_LDR_FMCONFIG_OFFSET 16
7944 #define OPA_LDR_PORTRCV_OFFSET 0
7945 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7946 {
7947         u64 info, hdr0, hdr1;
7948         const char *extra;
7949         char buf[96];
7950         struct hfi1_pportdata *ppd = dd->pport;
7951         u8 lcl_reason = 0;
7952         int do_bounce = 0;
7953
7954         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7955                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7956                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7957                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7958                         /* set status bit */
7959                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7960                 }
7961                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7962         }
7963
7964         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7965                 struct hfi1_pportdata *ppd = dd->pport;
7966                 /* this counter saturates at (2^32) - 1 */
7967                 if (ppd->link_downed < (u32)UINT_MAX)
7968                         ppd->link_downed++;
7969                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7970         }
7971
7972         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7973                 u8 reason_valid = 1;
7974
7975                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7976                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7977                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7978                         /* set status bit */
7979                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7980                 }
7981                 switch (info) {
7982                 case 0:
7983                 case 1:
7984                 case 2:
7985                 case 3:
7986                 case 4:
7987                 case 5:
7988                 case 6:
7989                         extra = fm_config_txt[info];
7990                         break;
7991                 case 8:
7992                         extra = fm_config_txt[info];
7993                         if (ppd->port_error_action &
7994                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7995                                 do_bounce = 1;
7996                                 /*
7997                                  * lcl_reason cannot be derived from info
7998                                  * for this error
7999                                  */
8000                                 lcl_reason =
8001                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
8002                         }
8003                         break;
8004                 default:
8005                         reason_valid = 0;
8006                         snprintf(buf, sizeof(buf), "reserved%lld", info);
8007                         extra = buf;
8008                         break;
8009                 }
8010
8011                 if (reason_valid && !do_bounce) {
8012                         do_bounce = ppd->port_error_action &
8013                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
8014                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
8015                 }
8016
8017                 /* just report this */
8018                 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8019                                         extra);
8020                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8021         }
8022
8023         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8024                 u8 reason_valid = 1;
8025
8026                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8027                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8028                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8029                 if (!(dd->err_info_rcvport.status_and_code &
8030                       OPA_EI_STATUS_SMASK)) {
8031                         dd->err_info_rcvport.status_and_code =
8032                                 info & OPA_EI_CODE_SMASK;
8033                         /* set status bit */
8034                         dd->err_info_rcvport.status_and_code |=
8035                                 OPA_EI_STATUS_SMASK;
8036                         /*
8037                          * save first 2 flits in the packet that caused
8038                          * the error
8039                          */
8040                         dd->err_info_rcvport.packet_flit1 = hdr0;
8041                         dd->err_info_rcvport.packet_flit2 = hdr1;
8042                 }
8043                 switch (info) {
8044                 case 1:
8045                 case 2:
8046                 case 3:
8047                 case 4:
8048                 case 5:
8049                 case 6:
8050                 case 7:
8051                 case 9:
8052                 case 11:
8053                 case 12:
8054                         extra = port_rcv_txt[info];
8055                         break;
8056                 default:
8057                         reason_valid = 0;
8058                         snprintf(buf, sizeof(buf), "reserved%lld", info);
8059                         extra = buf;
8060                         break;
8061                 }
8062
8063                 if (reason_valid && !do_bounce) {
8064                         do_bounce = ppd->port_error_action &
8065                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8066                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8067                 }
8068
8069                 /* just report this */
8070                 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8071                                         "               hdr0 0x%llx, hdr1 0x%llx\n",
8072                                         extra, hdr0, hdr1);
8073
8074                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8075         }
8076
8077         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8078                 /* informative only */
8079                 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8080                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8081         }
8082         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8083                 /* informative only */
8084                 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8085                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8086         }
8087
8088         if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8089                 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8090
8091         /* report any remaining errors */
8092         if (reg)
8093                 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8094                                         dcc_err_string(buf, sizeof(buf), reg));
8095
8096         if (lcl_reason == 0)
8097                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8098
8099         if (do_bounce) {
8100                 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8101                                         __func__);
8102                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8103                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8104         }
8105 }
8106
8107 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8108 {
8109         char buf[96];
8110
8111         dd_dev_info(dd, "LCB Error: %s\n",
8112                     lcb_err_string(buf, sizeof(buf), reg));
8113 }
8114
8115 /*
8116  * CCE block DC interrupt.  Source is < 8.
8117  */
8118 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8119 {
8120         const struct err_reg_info *eri = &dc_errs[source];
8121
8122         if (eri->handler) {
8123                 interrupt_clear_down(dd, 0, eri);
8124         } else if (source == 3 /* dc_lbm_int */) {
8125                 /*
8126                  * This indicates that a parity error has occurred on the
8127                  * address/control lines presented to the LBM.  The error
8128                  * is a single pulse, there is no associated error flag,
8129                  * and it is non-maskable.  This is because if a parity
8130                  * error occurs on the request the request is dropped.
8131                  * This should never occur, but it is nice to know if it
8132                  * ever does.
8133                  */
8134                 dd_dev_err(dd, "Parity error in DC LBM block\n");
8135         } else {
8136                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8137         }
8138 }
8139
8140 /*
8141  * TX block send credit interrupt.  Source is < 160.
8142  */
8143 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8144 {
8145         sc_group_release_update(dd, source);
8146 }
8147
8148 /*
8149  * TX block SDMA interrupt.  Source is < 48.
8150  *
8151  * SDMA interrupts are grouped by type:
8152  *
8153  *       0 -  N-1 = SDma
8154  *       N - 2N-1 = SDmaProgress
8155  *      2N - 3N-1 = SDmaIdle
8156  */
8157 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8158 {
8159         /* what interrupt */
8160         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8161         /* which engine */
8162         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8163
8164 #ifdef CONFIG_SDMA_VERBOSITY
8165         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8166                    slashstrip(__FILE__), __LINE__, __func__);
8167         sdma_dumpstate(&dd->per_sdma[which]);
8168 #endif
8169
8170         if (likely(what < 3 && which < dd->num_sdma)) {
8171                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8172         } else {
8173                 /* should not happen */
8174                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8175         }
8176 }
8177
8178 /**
8179  * is_rcv_avail_int() - User receive context available IRQ handler
8180  * @dd: valid dd
8181  * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8182  *
8183  * RX block receive available interrupt.  Source is < 160.
8184  *
8185  * This is the general interrupt handler for user (PSM) receive contexts,
8186  * and can only be used for non-threaded IRQs.
8187  */
8188 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8189 {
8190         struct hfi1_ctxtdata *rcd;
8191         char *err_detail;
8192
8193         if (likely(source < dd->num_rcv_contexts)) {
8194                 rcd = hfi1_rcd_get_by_index(dd, source);
8195                 if (rcd) {
8196                         handle_user_interrupt(rcd);
8197                         hfi1_rcd_put(rcd);
8198                         return; /* OK */
8199                 }
8200                 /* received an interrupt, but no rcd */
8201                 err_detail = "dataless";
8202         } else {
8203                 /* received an interrupt, but are not using that context */
8204                 err_detail = "out of range";
8205         }
8206         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8207                    err_detail, source);
8208 }
8209
8210 /**
8211  * is_rcv_urgent_int() - User receive context urgent IRQ handler
8212  * @dd: valid dd
8213  * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8214  *
8215  * RX block receive urgent interrupt.  Source is < 160.
8216  *
8217  * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8218  */
8219 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8220 {
8221         struct hfi1_ctxtdata *rcd;
8222         char *err_detail;
8223
8224         if (likely(source < dd->num_rcv_contexts)) {
8225                 rcd = hfi1_rcd_get_by_index(dd, source);
8226                 if (rcd) {
8227                         handle_user_interrupt(rcd);
8228                         hfi1_rcd_put(rcd);
8229                         return; /* OK */
8230                 }
8231                 /* received an interrupt, but no rcd */
8232                 err_detail = "dataless";
8233         } else {
8234                 /* received an interrupt, but are not using that context */
8235                 err_detail = "out of range";
8236         }
8237         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8238                    err_detail, source);
8239 }
8240
8241 /*
8242  * Reserved range interrupt.  Should not be called in normal operation.
8243  */
8244 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8245 {
8246         char name[64];
8247
8248         dd_dev_err(dd, "unexpected %s interrupt\n",
8249                    is_reserved_name(name, sizeof(name), source));
8250 }
8251
8252 static const struct is_table is_table[] = {
8253 /*
8254  * start                 end
8255  *                              name func               interrupt func
8256  */
8257 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8258                                 is_misc_err_name,       is_misc_err_int },
8259 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8260                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
8261 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8262                                 is_sendctxt_err_name,   is_sendctxt_err_int },
8263 { IS_SDMA_START,             IS_SDMA_IDLE_END,
8264                                 is_sdma_eng_name,       is_sdma_eng_int },
8265 { IS_VARIOUS_START,          IS_VARIOUS_END,
8266                                 is_various_name,        is_various_int },
8267 { IS_DC_START,       IS_DC_END,
8268                                 is_dc_name,             is_dc_int },
8269 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8270                                 is_rcv_avail_name,      is_rcv_avail_int },
8271 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8272                                 is_rcv_urgent_name,     is_rcv_urgent_int },
8273 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8274                                 is_send_credit_name,    is_send_credit_int},
8275 { IS_RESERVED_START,     IS_RESERVED_END,
8276                                 is_reserved_name,       is_reserved_int},
8277 };
8278
8279 /*
8280  * Interrupt source interrupt - called when the given source has an interrupt.
8281  * Source is a bit index into an array of 64-bit integers.
8282  */
8283 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8284 {
8285         const struct is_table *entry;
8286
8287         /* avoids a double compare by walking the table in-order */
8288         for (entry = &is_table[0]; entry->is_name; entry++) {
8289                 if (source <= entry->end) {
8290                         trace_hfi1_interrupt(dd, entry, source);
8291                         entry->is_int(dd, source - entry->start);
8292                         return;
8293                 }
8294         }
8295         /* fell off the end */
8296         dd_dev_err(dd, "invalid interrupt source %u\n", source);
8297 }
8298
8299 /**
8300  * gerneral_interrupt() -  General interrupt handler
8301  * @irq: MSIx IRQ vector
8302  * @data: hfi1 devdata
8303  *
8304  * This is able to correctly handle all non-threaded interrupts.  Receive
8305  * context DATA IRQs are threaded and are not supported by this handler.
8306  *
8307  */
8308 irqreturn_t general_interrupt(int irq, void *data)
8309 {
8310         struct hfi1_devdata *dd = data;
8311         u64 regs[CCE_NUM_INT_CSRS];
8312         u32 bit;
8313         int i;
8314         irqreturn_t handled = IRQ_NONE;
8315
8316         this_cpu_inc(*dd->int_counter);
8317
8318         /* phase 1: scan and clear all handled interrupts */
8319         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8320                 if (dd->gi_mask[i] == 0) {
8321                         regs[i] = 0;    /* used later */
8322                         continue;
8323                 }
8324                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8325                                 dd->gi_mask[i];
8326                 /* only clear if anything is set */
8327                 if (regs[i])
8328                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8329         }
8330
8331         /* phase 2: call the appropriate handler */
8332         for_each_set_bit(bit, (unsigned long *)&regs[0],
8333                          CCE_NUM_INT_CSRS * 64) {
8334                 is_interrupt(dd, bit);
8335                 handled = IRQ_HANDLED;
8336         }
8337
8338         return handled;
8339 }
8340
8341 irqreturn_t sdma_interrupt(int irq, void *data)
8342 {
8343         struct sdma_engine *sde = data;
8344         struct hfi1_devdata *dd = sde->dd;
8345         u64 status;
8346
8347 #ifdef CONFIG_SDMA_VERBOSITY
8348         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8349                    slashstrip(__FILE__), __LINE__, __func__);
8350         sdma_dumpstate(sde);
8351 #endif
8352
8353         this_cpu_inc(*dd->int_counter);
8354
8355         /* This read_csr is really bad in the hot path */
8356         status = read_csr(dd,
8357                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8358                           & sde->imask;
8359         if (likely(status)) {
8360                 /* clear the interrupt(s) */
8361                 write_csr(dd,
8362                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8363                           status);
8364
8365                 /* handle the interrupt(s) */
8366                 sdma_engine_interrupt(sde, status);
8367         } else {
8368                 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8369                                         sde->this_idx);
8370         }
8371         return IRQ_HANDLED;
8372 }
8373
8374 /*
8375  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8376  * to insure that the write completed.  This does NOT guarantee that
8377  * queued DMA writes to memory from the chip are pushed.
8378  */
8379 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8380 {
8381         struct hfi1_devdata *dd = rcd->dd;
8382         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8383
8384         write_csr(dd, addr, rcd->imask);
8385         /* force the above write on the chip and get a value back */
8386         (void)read_csr(dd, addr);
8387 }
8388
8389 /* force the receive interrupt */
8390 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8391 {
8392         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8393 }
8394
8395 /*
8396  * Return non-zero if a packet is present.
8397  *
8398  * This routine is called when rechecking for packets after the RcvAvail
8399  * interrupt has been cleared down.  First, do a quick check of memory for
8400  * a packet present.  If not found, use an expensive CSR read of the context
8401  * tail to determine the actual tail.  The CSR read is necessary because there
8402  * is no method to push pending DMAs to memory other than an interrupt and we
8403  * are trying to determine if we need to force an interrupt.
8404  */
8405 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8406 {
8407         u32 tail;
8408
8409         if (hfi1_packet_present(rcd))
8410                 return 1;
8411
8412         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8413         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8414         return hfi1_rcd_head(rcd) != tail;
8415 }
8416
8417 /**
8418  * Common code for receive contexts interrupt handlers.
8419  * Update traces, increment kernel IRQ counter and
8420  * setup ASPM when needed.
8421  */
8422 static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
8423 {
8424         struct hfi1_devdata *dd = rcd->dd;
8425
8426         trace_hfi1_receive_interrupt(dd, rcd);
8427         this_cpu_inc(*dd->int_counter);
8428         aspm_ctx_disable(rcd);
8429 }
8430
8431 /**
8432  * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
8433  * when there are packets present in the queue. When calling
8434  * with interrupts enabled please use hfi1_rcd_eoi_intr.
8435  *
8436  * @rcd: valid receive context
8437  */
8438 static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
8439 {
8440         clear_recv_intr(rcd);
8441         if (check_packet_present(rcd))
8442                 force_recv_intr(rcd);
8443 }
8444
8445 /**
8446  * hfi1_rcd_eoi_intr() - End of Interrupt processing action
8447  *
8448  * @rcd: Ptr to hfi1_ctxtdata of receive context
8449  *
8450  *  Hold IRQs so we can safely clear the interrupt and
8451  *  recheck for a packet that may have arrived after the previous
8452  *  check and the interrupt clear.  If a packet arrived, force another
8453  *  interrupt. This routine can be called at the end of receive packet
8454  *  processing in interrupt service routines, interrupt service thread
8455  *  and softirqs
8456  */
8457 static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
8458 {
8459         unsigned long flags;
8460
8461         local_irq_save(flags);
8462         __hfi1_rcd_eoi_intr(rcd);
8463         local_irq_restore(flags);
8464 }
8465
8466 /*
8467  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8468  * This routine will try to handle packets immediately (latency), but if
8469  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8470  * chip receive interrupt is *not* cleared down until this or the thread (if
8471  * invoked) is finished.  The intent is to avoid extra interrupts while we
8472  * are processing packets anyway.
8473  */
8474 irqreturn_t receive_context_interrupt(int irq, void *data)
8475 {
8476         struct hfi1_ctxtdata *rcd = data;
8477         int disposition;
8478
8479         receive_interrupt_common(rcd);
8480
8481         /* receive interrupt remains blocked while processing packets */
8482         disposition = rcd->do_interrupt(rcd, 0);
8483
8484         /*
8485          * Too many packets were seen while processing packets in this
8486          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8487          * remains blocked.
8488          */
8489         if (disposition == RCV_PKT_LIMIT)
8490                 return IRQ_WAKE_THREAD;
8491
8492         __hfi1_rcd_eoi_intr(rcd);
8493         return IRQ_HANDLED;
8494 }
8495
8496 /*
8497  * Receive packet thread handler.  This expects to be invoked with the
8498  * receive interrupt still blocked.
8499  */
8500 irqreturn_t receive_context_thread(int irq, void *data)
8501 {
8502         struct hfi1_ctxtdata *rcd = data;
8503
8504         /* receive interrupt is still blocked from the IRQ handler */
8505         (void)rcd->do_interrupt(rcd, 1);
8506
8507         hfi1_rcd_eoi_intr(rcd);
8508
8509         return IRQ_HANDLED;
8510 }
8511
8512 /* ========================================================================= */
8513
8514 u32 read_physical_state(struct hfi1_devdata *dd)
8515 {
8516         u64 reg;
8517
8518         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8519         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8520                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8521 }
8522
8523 u32 read_logical_state(struct hfi1_devdata *dd)
8524 {
8525         u64 reg;
8526
8527         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8528         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8529                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8530 }
8531
8532 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8533 {
8534         u64 reg;
8535
8536         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8537         /* clear current state, set new state */
8538         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8539         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8540         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8541 }
8542
8543 /*
8544  * Use the 8051 to read a LCB CSR.
8545  */
8546 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8547 {
8548         u32 regno;
8549         int ret;
8550
8551         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8552                 if (acquire_lcb_access(dd, 0) == 0) {
8553                         *data = read_csr(dd, addr);
8554                         release_lcb_access(dd, 0);
8555                         return 0;
8556                 }
8557                 return -EBUSY;
8558         }
8559
8560         /* register is an index of LCB registers: (offset - base) / 8 */
8561         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8562         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8563         if (ret != HCMD_SUCCESS)
8564                 return -EBUSY;
8565         return 0;
8566 }
8567
8568 /*
8569  * Provide a cache for some of the LCB registers in case the LCB is
8570  * unavailable.
8571  * (The LCB is unavailable in certain link states, for example.)
8572  */
8573 struct lcb_datum {
8574         u32 off;
8575         u64 val;
8576 };
8577
8578 static struct lcb_datum lcb_cache[] = {
8579         { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8580         { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8581         { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8582 };
8583
8584 static void update_lcb_cache(struct hfi1_devdata *dd)
8585 {
8586         int i;
8587         int ret;
8588         u64 val;
8589
8590         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8591                 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8592
8593                 /* Update if we get good data */
8594                 if (likely(ret != -EBUSY))
8595                         lcb_cache[i].val = val;
8596         }
8597 }
8598
8599 static int read_lcb_cache(u32 off, u64 *val)
8600 {
8601         int i;
8602
8603         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8604                 if (lcb_cache[i].off == off) {
8605                         *val = lcb_cache[i].val;
8606                         return 0;
8607                 }
8608         }
8609
8610         pr_warn("%s bad offset 0x%x\n", __func__, off);
8611         return -1;
8612 }
8613
8614 /*
8615  * Read an LCB CSR.  Access may not be in host control, so check.
8616  * Return 0 on success, -EBUSY on failure.
8617  */
8618 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8619 {
8620         struct hfi1_pportdata *ppd = dd->pport;
8621
8622         /* if up, go through the 8051 for the value */
8623         if (ppd->host_link_state & HLS_UP)
8624                 return read_lcb_via_8051(dd, addr, data);
8625         /* if going up or down, check the cache, otherwise, no access */
8626         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8627                 if (read_lcb_cache(addr, data))
8628                         return -EBUSY;
8629                 return 0;
8630         }
8631
8632         /* otherwise, host has access */
8633         *data = read_csr(dd, addr);
8634         return 0;
8635 }
8636
8637 /*
8638  * Use the 8051 to write a LCB CSR.
8639  */
8640 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8641 {
8642         u32 regno;
8643         int ret;
8644
8645         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8646             (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8647                 if (acquire_lcb_access(dd, 0) == 0) {
8648                         write_csr(dd, addr, data);
8649                         release_lcb_access(dd, 0);
8650                         return 0;
8651                 }
8652                 return -EBUSY;
8653         }
8654
8655         /* register is an index of LCB registers: (offset - base) / 8 */
8656         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8657         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8658         if (ret != HCMD_SUCCESS)
8659                 return -EBUSY;
8660         return 0;
8661 }
8662
8663 /*
8664  * Write an LCB CSR.  Access may not be in host control, so check.
8665  * Return 0 on success, -EBUSY on failure.
8666  */
8667 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8668 {
8669         struct hfi1_pportdata *ppd = dd->pport;
8670
8671         /* if up, go through the 8051 for the value */
8672         if (ppd->host_link_state & HLS_UP)
8673                 return write_lcb_via_8051(dd, addr, data);
8674         /* if going up or down, no access */
8675         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8676                 return -EBUSY;
8677         /* otherwise, host has access */
8678         write_csr(dd, addr, data);
8679         return 0;
8680 }
8681
8682 /*
8683  * Returns:
8684  *      < 0 = Linux error, not able to get access
8685  *      > 0 = 8051 command RETURN_CODE
8686  */
8687 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8688                            u64 *out_data)
8689 {
8690         u64 reg, completed;
8691         int return_code;
8692         unsigned long timeout;
8693
8694         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8695
8696         mutex_lock(&dd->dc8051_lock);
8697
8698         /* We can't send any commands to the 8051 if it's in reset */
8699         if (dd->dc_shutdown) {
8700                 return_code = -ENODEV;
8701                 goto fail;
8702         }
8703
8704         /*
8705          * If an 8051 host command timed out previously, then the 8051 is
8706          * stuck.
8707          *
8708          * On first timeout, attempt to reset and restart the entire DC
8709          * block (including 8051). (Is this too big of a hammer?)
8710          *
8711          * If the 8051 times out a second time, the reset did not bring it
8712          * back to healthy life. In that case, fail any subsequent commands.
8713          */
8714         if (dd->dc8051_timed_out) {
8715                 if (dd->dc8051_timed_out > 1) {
8716                         dd_dev_err(dd,
8717                                    "Previous 8051 host command timed out, skipping command %u\n",
8718                                    type);
8719                         return_code = -ENXIO;
8720                         goto fail;
8721                 }
8722                 _dc_shutdown(dd);
8723                 _dc_start(dd);
8724         }
8725
8726         /*
8727          * If there is no timeout, then the 8051 command interface is
8728          * waiting for a command.
8729          */
8730
8731         /*
8732          * When writing a LCB CSR, out_data contains the full value to
8733          * to be written, while in_data contains the relative LCB
8734          * address in 7:0.  Do the work here, rather than the caller,
8735          * of distrubting the write data to where it needs to go:
8736          *
8737          * Write data
8738          *   39:00 -> in_data[47:8]
8739          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8740          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8741          */
8742         if (type == HCMD_WRITE_LCB_CSR) {
8743                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8744                 /* must preserve COMPLETED - it is tied to hardware */
8745                 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8746                 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8747                 reg |= ((((*out_data) >> 40) & 0xff) <<
8748                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8749                       | ((((*out_data) >> 48) & 0xffff) <<
8750                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8751                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8752         }
8753
8754         /*
8755          * Do two writes: the first to stabilize the type and req_data, the
8756          * second to activate.
8757          */
8758         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8759                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8760                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8761                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8762         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8763         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8764         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8765
8766         /* wait for completion, alternate: interrupt */
8767         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8768         while (1) {
8769                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8770                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8771                 if (completed)
8772                         break;
8773                 if (time_after(jiffies, timeout)) {
8774                         dd->dc8051_timed_out++;
8775                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8776                         if (out_data)
8777                                 *out_data = 0;
8778                         return_code = -ETIMEDOUT;
8779                         goto fail;
8780                 }
8781                 udelay(2);
8782         }
8783
8784         if (out_data) {
8785                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8786                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8787                 if (type == HCMD_READ_LCB_CSR) {
8788                         /* top 16 bits are in a different register */
8789                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8790                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8791                                 << (48
8792                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8793                 }
8794         }
8795         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8796                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8797         dd->dc8051_timed_out = 0;
8798         /*
8799          * Clear command for next user.
8800          */
8801         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8802
8803 fail:
8804         mutex_unlock(&dd->dc8051_lock);
8805         return return_code;
8806 }
8807
8808 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8809 {
8810         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8811 }
8812
8813 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8814                      u8 lane_id, u32 config_data)
8815 {
8816         u64 data;
8817         int ret;
8818
8819         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8820                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8821                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8822         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8823         if (ret != HCMD_SUCCESS) {
8824                 dd_dev_err(dd,
8825                            "load 8051 config: field id %d, lane %d, err %d\n",
8826                            (int)field_id, (int)lane_id, ret);
8827         }
8828         return ret;
8829 }
8830
8831 /*
8832  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8833  * set the result, even on error.
8834  * Return 0 on success, -errno on failure
8835  */
8836 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8837                      u32 *result)
8838 {
8839         u64 big_data;
8840         u32 addr;
8841         int ret;
8842
8843         /* address start depends on the lane_id */
8844         if (lane_id < 4)
8845                 addr = (4 * NUM_GENERAL_FIELDS)
8846                         + (lane_id * 4 * NUM_LANE_FIELDS);
8847         else
8848                 addr = 0;
8849         addr += field_id * 4;
8850
8851         /* read is in 8-byte chunks, hardware will truncate the address down */
8852         ret = read_8051_data(dd, addr, 8, &big_data);
8853
8854         if (ret == 0) {
8855                 /* extract the 4 bytes we want */
8856                 if (addr & 0x4)
8857                         *result = (u32)(big_data >> 32);
8858                 else
8859                         *result = (u32)big_data;
8860         } else {
8861                 *result = 0;
8862                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8863                            __func__, lane_id, field_id);
8864         }
8865
8866         return ret;
8867 }
8868
8869 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8870                               u8 continuous)
8871 {
8872         u32 frame;
8873
8874         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8875                 | power_management << POWER_MANAGEMENT_SHIFT;
8876         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8877                                 GENERAL_CONFIG, frame);
8878 }
8879
8880 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8881                                  u16 vl15buf, u8 crc_sizes)
8882 {
8883         u32 frame;
8884
8885         frame = (u32)vau << VAU_SHIFT
8886                 | (u32)z << Z_SHIFT
8887                 | (u32)vcu << VCU_SHIFT
8888                 | (u32)vl15buf << VL15BUF_SHIFT
8889                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8890         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8891                                 GENERAL_CONFIG, frame);
8892 }
8893
8894 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8895                                     u8 *flag_bits, u16 *link_widths)
8896 {
8897         u32 frame;
8898
8899         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8900                          &frame);
8901         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8902         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8903         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8904 }
8905
8906 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8907                                     u8 misc_bits,
8908                                     u8 flag_bits,
8909                                     u16 link_widths)
8910 {
8911         u32 frame;
8912
8913         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8914                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8915                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8916         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8917                      frame);
8918 }
8919
8920 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8921                                  u8 device_rev)
8922 {
8923         u32 frame;
8924
8925         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8926                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8927         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8928 }
8929
8930 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8931                                   u8 *device_rev)
8932 {
8933         u32 frame;
8934
8935         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8936         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8937         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8938                         & REMOTE_DEVICE_REV_MASK;
8939 }
8940
8941 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8942 {
8943         u32 frame;
8944         u32 mask;
8945
8946         mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8947         read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8948         /* Clear, then set field */
8949         frame &= ~mask;
8950         frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8951         return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8952                                 frame);
8953 }
8954
8955 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8956                       u8 *ver_patch)
8957 {
8958         u32 frame;
8959
8960         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8961         *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8962                 STS_FM_VERSION_MAJOR_MASK;
8963         *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8964                 STS_FM_VERSION_MINOR_MASK;
8965
8966         read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8967         *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8968                 STS_FM_VERSION_PATCH_MASK;
8969 }
8970
8971 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8972                                u8 *continuous)
8973 {
8974         u32 frame;
8975
8976         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8977         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8978                                         & POWER_MANAGEMENT_MASK;
8979         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8980                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8981 }
8982
8983 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8984                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8985 {
8986         u32 frame;
8987
8988         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8989         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8990         *z = (frame >> Z_SHIFT) & Z_MASK;
8991         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8992         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8993         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8994 }
8995
8996 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8997                                       u8 *remote_tx_rate,
8998                                       u16 *link_widths)
8999 {
9000         u32 frame;
9001
9002         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
9003                          &frame);
9004         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
9005                                 & REMOTE_TX_RATE_MASK;
9006         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
9007 }
9008
9009 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
9010 {
9011         u32 frame;
9012
9013         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
9014         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
9015 }
9016
9017 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
9018 {
9019         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
9020 }
9021
9022 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
9023 {
9024         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
9025 }
9026
9027 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9028 {
9029         u32 frame;
9030         int ret;
9031
9032         *link_quality = 0;
9033         if (dd->pport->host_link_state & HLS_UP) {
9034                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9035                                        &frame);
9036                 if (ret == 0)
9037                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
9038                                                 & LINK_QUALITY_MASK;
9039         }
9040 }
9041
9042 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9043 {
9044         u32 frame;
9045
9046         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9047         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9048 }
9049
9050 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9051 {
9052         u32 frame;
9053
9054         read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9055         *ldr = (frame & 0xff);
9056 }
9057
9058 static int read_tx_settings(struct hfi1_devdata *dd,
9059                             u8 *enable_lane_tx,
9060                             u8 *tx_polarity_inversion,
9061                             u8 *rx_polarity_inversion,
9062                             u8 *max_rate)
9063 {
9064         u32 frame;
9065         int ret;
9066
9067         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9068         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9069                                 & ENABLE_LANE_TX_MASK;
9070         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9071                                 & TX_POLARITY_INVERSION_MASK;
9072         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9073                                 & RX_POLARITY_INVERSION_MASK;
9074         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9075         return ret;
9076 }
9077
9078 static int write_tx_settings(struct hfi1_devdata *dd,
9079                              u8 enable_lane_tx,
9080                              u8 tx_polarity_inversion,
9081                              u8 rx_polarity_inversion,
9082                              u8 max_rate)
9083 {
9084         u32 frame;
9085
9086         /* no need to mask, all variable sizes match field widths */
9087         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9088                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9089                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9090                 | max_rate << MAX_RATE_SHIFT;
9091         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9092 }
9093
9094 /*
9095  * Read an idle LCB message.
9096  *
9097  * Returns 0 on success, -EINVAL on error
9098  */
9099 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9100 {
9101         int ret;
9102
9103         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9104         if (ret != HCMD_SUCCESS) {
9105                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9106                            (u32)type, ret);
9107                 return -EINVAL;
9108         }
9109         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9110         /* return only the payload as we already know the type */
9111         *data_out >>= IDLE_PAYLOAD_SHIFT;
9112         return 0;
9113 }
9114
9115 /*
9116  * Read an idle SMA message.  To be done in response to a notification from
9117  * the 8051.
9118  *
9119  * Returns 0 on success, -EINVAL on error
9120  */
9121 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9122 {
9123         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9124                                  data);
9125 }
9126
9127 /*
9128  * Send an idle LCB message.
9129  *
9130  * Returns 0 on success, -EINVAL on error
9131  */
9132 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9133 {
9134         int ret;
9135
9136         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9137         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9138         if (ret != HCMD_SUCCESS) {
9139                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9140                            data, ret);
9141                 return -EINVAL;
9142         }
9143         return 0;
9144 }
9145
9146 /*
9147  * Send an idle SMA message.
9148  *
9149  * Returns 0 on success, -EINVAL on error
9150  */
9151 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9152 {
9153         u64 data;
9154
9155         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9156                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9157         return send_idle_message(dd, data);
9158 }
9159
9160 /*
9161  * Initialize the LCB then do a quick link up.  This may or may not be
9162  * in loopback.
9163  *
9164  * return 0 on success, -errno on error
9165  */
9166 static int do_quick_linkup(struct hfi1_devdata *dd)
9167 {
9168         int ret;
9169
9170         lcb_shutdown(dd, 0);
9171
9172         if (loopback) {
9173                 /* LCB_CFG_LOOPBACK.VAL = 2 */
9174                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9175                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9176                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9177                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9178         }
9179
9180         /* start the LCBs */
9181         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9182         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9183
9184         /* simulator only loopback steps */
9185         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9186                 /* LCB_CFG_RUN.EN = 1 */
9187                 write_csr(dd, DC_LCB_CFG_RUN,
9188                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9189
9190                 ret = wait_link_transfer_active(dd, 10);
9191                 if (ret)
9192                         return ret;
9193
9194                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9195                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9196         }
9197
9198         if (!loopback) {
9199                 /*
9200                  * When doing quick linkup and not in loopback, both
9201                  * sides must be done with LCB set-up before either
9202                  * starts the quick linkup.  Put a delay here so that
9203                  * both sides can be started and have a chance to be
9204                  * done with LCB set up before resuming.
9205                  */
9206                 dd_dev_err(dd,
9207                            "Pausing for peer to be finished with LCB set up\n");
9208                 msleep(5000);
9209                 dd_dev_err(dd, "Continuing with quick linkup\n");
9210         }
9211
9212         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9213         set_8051_lcb_access(dd);
9214
9215         /*
9216          * State "quick" LinkUp request sets the physical link state to
9217          * LinkUp without a verify capability sequence.
9218          * This state is in simulator v37 and later.
9219          */
9220         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9221         if (ret != HCMD_SUCCESS) {
9222                 dd_dev_err(dd,
9223                            "%s: set physical link state to quick LinkUp failed with return %d\n",
9224                            __func__, ret);
9225
9226                 set_host_lcb_access(dd);
9227                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9228
9229                 if (ret >= 0)
9230                         ret = -EINVAL;
9231                 return ret;
9232         }
9233
9234         return 0; /* success */
9235 }
9236
9237 /*
9238  * Do all special steps to set up loopback.
9239  */
9240 static int init_loopback(struct hfi1_devdata *dd)
9241 {
9242         dd_dev_info(dd, "Entering loopback mode\n");
9243
9244         /* all loopbacks should disable self GUID check */
9245         write_csr(dd, DC_DC8051_CFG_MODE,
9246                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9247
9248         /*
9249          * The simulator has only one loopback option - LCB.  Switch
9250          * to that option, which includes quick link up.
9251          *
9252          * Accept all valid loopback values.
9253          */
9254         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9255             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9256              loopback == LOOPBACK_CABLE)) {
9257                 loopback = LOOPBACK_LCB;
9258                 quick_linkup = 1;
9259                 return 0;
9260         }
9261
9262         /*
9263          * SerDes loopback init sequence is handled in set_local_link_attributes
9264          */
9265         if (loopback == LOOPBACK_SERDES)
9266                 return 0;
9267
9268         /* LCB loopback - handled at poll time */
9269         if (loopback == LOOPBACK_LCB) {
9270                 quick_linkup = 1; /* LCB is always quick linkup */
9271
9272                 /* not supported in emulation due to emulation RTL changes */
9273                 if (dd->icode == ICODE_FPGA_EMULATION) {
9274                         dd_dev_err(dd,
9275                                    "LCB loopback not supported in emulation\n");
9276                         return -EINVAL;
9277                 }
9278                 return 0;
9279         }
9280
9281         /* external cable loopback requires no extra steps */
9282         if (loopback == LOOPBACK_CABLE)
9283                 return 0;
9284
9285         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9286         return -EINVAL;
9287 }
9288
9289 /*
9290  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9291  * used in the Verify Capability link width attribute.
9292  */
9293 static u16 opa_to_vc_link_widths(u16 opa_widths)
9294 {
9295         int i;
9296         u16 result = 0;
9297
9298         static const struct link_bits {
9299                 u16 from;
9300                 u16 to;
9301         } opa_link_xlate[] = {
9302                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9303                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9304                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9305                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9306         };
9307
9308         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9309                 if (opa_widths & opa_link_xlate[i].from)
9310                         result |= opa_link_xlate[i].to;
9311         }
9312         return result;
9313 }
9314
9315 /*
9316  * Set link attributes before moving to polling.
9317  */
9318 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9319 {
9320         struct hfi1_devdata *dd = ppd->dd;
9321         u8 enable_lane_tx;
9322         u8 tx_polarity_inversion;
9323         u8 rx_polarity_inversion;
9324         int ret;
9325         u32 misc_bits = 0;
9326         /* reset our fabric serdes to clear any lingering problems */
9327         fabric_serdes_reset(dd);
9328
9329         /* set the local tx rate - need to read-modify-write */
9330         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9331                                &rx_polarity_inversion, &ppd->local_tx_rate);
9332         if (ret)
9333                 goto set_local_link_attributes_fail;
9334
9335         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9336                 /* set the tx rate to the fastest enabled */
9337                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9338                         ppd->local_tx_rate = 1;
9339                 else
9340                         ppd->local_tx_rate = 0;
9341         } else {
9342                 /* set the tx rate to all enabled */
9343                 ppd->local_tx_rate = 0;
9344                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9345                         ppd->local_tx_rate |= 2;
9346                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9347                         ppd->local_tx_rate |= 1;
9348         }
9349
9350         enable_lane_tx = 0xF; /* enable all four lanes */
9351         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9352                                 rx_polarity_inversion, ppd->local_tx_rate);
9353         if (ret != HCMD_SUCCESS)
9354                 goto set_local_link_attributes_fail;
9355
9356         ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9357         if (ret != HCMD_SUCCESS) {
9358                 dd_dev_err(dd,
9359                            "Failed to set host interface version, return 0x%x\n",
9360                            ret);
9361                 goto set_local_link_attributes_fail;
9362         }
9363
9364         /*
9365          * DC supports continuous updates.
9366          */
9367         ret = write_vc_local_phy(dd,
9368                                  0 /* no power management */,
9369                                  1 /* continuous updates */);
9370         if (ret != HCMD_SUCCESS)
9371                 goto set_local_link_attributes_fail;
9372
9373         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9374         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9375                                     ppd->port_crc_mode_enabled);
9376         if (ret != HCMD_SUCCESS)
9377                 goto set_local_link_attributes_fail;
9378
9379         /*
9380          * SerDes loopback init sequence requires
9381          * setting bit 0 of MISC_CONFIG_BITS
9382          */
9383         if (loopback == LOOPBACK_SERDES)
9384                 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9385
9386         /*
9387          * An external device configuration request is used to reset the LCB
9388          * to retry to obtain operational lanes when the first attempt is
9389          * unsuccesful.
9390          */
9391         if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9392                 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9393
9394         ret = write_vc_local_link_mode(dd, misc_bits, 0,
9395                                        opa_to_vc_link_widths(
9396                                                 ppd->link_width_enabled));
9397         if (ret != HCMD_SUCCESS)
9398                 goto set_local_link_attributes_fail;
9399
9400         /* let peer know who we are */
9401         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9402         if (ret == HCMD_SUCCESS)
9403                 return 0;
9404
9405 set_local_link_attributes_fail:
9406         dd_dev_err(dd,
9407                    "Failed to set local link attributes, return 0x%x\n",
9408                    ret);
9409         return ret;
9410 }
9411
9412 /*
9413  * Call this to start the link.
9414  * Do not do anything if the link is disabled.
9415  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9416  */
9417 int start_link(struct hfi1_pportdata *ppd)
9418 {
9419         /*
9420          * Tune the SerDes to a ballpark setting for optimal signal and bit
9421          * error rate.  Needs to be done before starting the link.
9422          */
9423         tune_serdes(ppd);
9424
9425         if (!ppd->driver_link_ready) {
9426                 dd_dev_info(ppd->dd,
9427                             "%s: stopping link start because driver is not ready\n",
9428                             __func__);
9429                 return 0;
9430         }
9431
9432         /*
9433          * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9434          * pkey table can be configured properly if the HFI unit is connected
9435          * to switch port with MgmtAllowed=NO
9436          */
9437         clear_full_mgmt_pkey(ppd);
9438
9439         return set_link_state(ppd, HLS_DN_POLL);
9440 }
9441
9442 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9443 {
9444         struct hfi1_devdata *dd = ppd->dd;
9445         u64 mask;
9446         unsigned long timeout;
9447
9448         /*
9449          * Some QSFP cables have a quirk that asserts the IntN line as a side
9450          * effect of power up on plug-in. We ignore this false positive
9451          * interrupt until the module has finished powering up by waiting for
9452          * a minimum timeout of the module inrush initialization time of
9453          * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9454          * module have stabilized.
9455          */
9456         msleep(500);
9457
9458         /*
9459          * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9460          */
9461         timeout = jiffies + msecs_to_jiffies(2000);
9462         while (1) {
9463                 mask = read_csr(dd, dd->hfi1_id ?
9464                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9465                 if (!(mask & QSFP_HFI0_INT_N))
9466                         break;
9467                 if (time_after(jiffies, timeout)) {
9468                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9469                                     __func__);
9470                         break;
9471                 }
9472                 udelay(2);
9473         }
9474 }
9475
9476 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9477 {
9478         struct hfi1_devdata *dd = ppd->dd;
9479         u64 mask;
9480
9481         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9482         if (enable) {
9483                 /*
9484                  * Clear the status register to avoid an immediate interrupt
9485                  * when we re-enable the IntN pin
9486                  */
9487                 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9488                           QSFP_HFI0_INT_N);
9489                 mask |= (u64)QSFP_HFI0_INT_N;
9490         } else {
9491                 mask &= ~(u64)QSFP_HFI0_INT_N;
9492         }
9493         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9494 }
9495
9496 int reset_qsfp(struct hfi1_pportdata *ppd)
9497 {
9498         struct hfi1_devdata *dd = ppd->dd;
9499         u64 mask, qsfp_mask;
9500
9501         /* Disable INT_N from triggering QSFP interrupts */
9502         set_qsfp_int_n(ppd, 0);
9503
9504         /* Reset the QSFP */
9505         mask = (u64)QSFP_HFI0_RESET_N;
9506
9507         qsfp_mask = read_csr(dd,
9508                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9509         qsfp_mask &= ~mask;
9510         write_csr(dd,
9511                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9512
9513         udelay(10);
9514
9515         qsfp_mask |= mask;
9516         write_csr(dd,
9517                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9518
9519         wait_for_qsfp_init(ppd);
9520
9521         /*
9522          * Allow INT_N to trigger the QSFP interrupt to watch
9523          * for alarms and warnings
9524          */
9525         set_qsfp_int_n(ppd, 1);
9526
9527         /*
9528          * After the reset, AOC transmitters are enabled by default. They need
9529          * to be turned off to complete the QSFP setup before they can be
9530          * enabled again.
9531          */
9532         return set_qsfp_tx(ppd, 0);
9533 }
9534
9535 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9536                                         u8 *qsfp_interrupt_status)
9537 {
9538         struct hfi1_devdata *dd = ppd->dd;
9539
9540         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9541             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9542                 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9543                            __func__);
9544
9545         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9546             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9547                 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9548                            __func__);
9549
9550         /*
9551          * The remaining alarms/warnings don't matter if the link is down.
9552          */
9553         if (ppd->host_link_state & HLS_DOWN)
9554                 return 0;
9555
9556         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9557             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9558                 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9559                            __func__);
9560
9561         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9562             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9563                 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9564                            __func__);
9565
9566         /* Byte 2 is vendor specific */
9567
9568         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9569             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9570                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9571                            __func__);
9572
9573         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9574             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9575                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9576                            __func__);
9577
9578         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9579             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9580                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9581                            __func__);
9582
9583         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9584             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9585                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9586                            __func__);
9587
9588         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9589             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9590                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9591                            __func__);
9592
9593         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9594             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9595                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9596                            __func__);
9597
9598         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9599             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9600                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9601                            __func__);
9602
9603         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9604             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9605                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9606                            __func__);
9607
9608         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9609             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9610                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9611                            __func__);
9612
9613         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9614             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9615                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9616                            __func__);
9617
9618         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9619             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9620                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9621                            __func__);
9622
9623         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9624             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9625                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9626                            __func__);
9627
9628         /* Bytes 9-10 and 11-12 are reserved */
9629         /* Bytes 13-15 are vendor specific */
9630
9631         return 0;
9632 }
9633
9634 /* This routine will only be scheduled if the QSFP module present is asserted */
9635 void qsfp_event(struct work_struct *work)
9636 {
9637         struct qsfp_data *qd;
9638         struct hfi1_pportdata *ppd;
9639         struct hfi1_devdata *dd;
9640
9641         qd = container_of(work, struct qsfp_data, qsfp_work);
9642         ppd = qd->ppd;
9643         dd = ppd->dd;
9644
9645         /* Sanity check */
9646         if (!qsfp_mod_present(ppd))
9647                 return;
9648
9649         if (ppd->host_link_state == HLS_DN_DISABLE) {
9650                 dd_dev_info(ppd->dd,
9651                             "%s: stopping link start because link is disabled\n",
9652                             __func__);
9653                 return;
9654         }
9655
9656         /*
9657          * Turn DC back on after cable has been re-inserted. Up until
9658          * now, the DC has been in reset to save power.
9659          */
9660         dc_start(dd);
9661
9662         if (qd->cache_refresh_required) {
9663                 set_qsfp_int_n(ppd, 0);
9664
9665                 wait_for_qsfp_init(ppd);
9666
9667                 /*
9668                  * Allow INT_N to trigger the QSFP interrupt to watch
9669                  * for alarms and warnings
9670                  */
9671                 set_qsfp_int_n(ppd, 1);
9672
9673                 start_link(ppd);
9674         }
9675
9676         if (qd->check_interrupt_flags) {
9677                 u8 qsfp_interrupt_status[16] = {0,};
9678
9679                 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9680                                   &qsfp_interrupt_status[0], 16) != 16) {
9681                         dd_dev_info(dd,
9682                                     "%s: Failed to read status of QSFP module\n",
9683                                     __func__);
9684                 } else {
9685                         unsigned long flags;
9686
9687                         handle_qsfp_error_conditions(
9688                                         ppd, qsfp_interrupt_status);
9689                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9690                         ppd->qsfp_info.check_interrupt_flags = 0;
9691                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9692                                                flags);
9693                 }
9694         }
9695 }
9696
9697 void init_qsfp_int(struct hfi1_devdata *dd)
9698 {
9699         struct hfi1_pportdata *ppd = dd->pport;
9700         u64 qsfp_mask;
9701
9702         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9703         /* Clear current status to avoid spurious interrupts */
9704         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9705                   qsfp_mask);
9706         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9707                   qsfp_mask);
9708
9709         set_qsfp_int_n(ppd, 0);
9710
9711         /* Handle active low nature of INT_N and MODPRST_N pins */
9712         if (qsfp_mod_present(ppd))
9713                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9714         write_csr(dd,
9715                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9716                   qsfp_mask);
9717
9718         /* Enable the appropriate QSFP IRQ source */
9719         if (!dd->hfi1_id)
9720                 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9721         else
9722                 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9723 }
9724
9725 /*
9726  * Do a one-time initialize of the LCB block.
9727  */
9728 static void init_lcb(struct hfi1_devdata *dd)
9729 {
9730         /* simulator does not correctly handle LCB cclk loopback, skip */
9731         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9732                 return;
9733
9734         /* the DC has been reset earlier in the driver load */
9735
9736         /* set LCB for cclk loopback on the port */
9737         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9738         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9739         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9740         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9741         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9742         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9743         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9744 }
9745
9746 /*
9747  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9748  * on error.
9749  */
9750 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9751 {
9752         int ret;
9753         u8 status;
9754
9755         /*
9756          * Report success if not a QSFP or, if it is a QSFP, but the cable is
9757          * not present
9758          */
9759         if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9760                 return 0;
9761
9762         /* read byte 2, the status byte */
9763         ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9764         if (ret < 0)
9765                 return ret;
9766         if (ret != 1)
9767                 return -EIO;
9768
9769         return 0; /* success */
9770 }
9771
9772 /*
9773  * Values for QSFP retry.
9774  *
9775  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9776  * arrived at from experience on a large cluster.
9777  */
9778 #define MAX_QSFP_RETRIES 20
9779 #define QSFP_RETRY_WAIT 500 /* msec */
9780
9781 /*
9782  * Try a QSFP read.  If it fails, schedule a retry for later.
9783  * Called on first link activation after driver load.
9784  */
9785 static void try_start_link(struct hfi1_pportdata *ppd)
9786 {
9787         if (test_qsfp_read(ppd)) {
9788                 /* read failed */
9789                 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9790                         dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9791                         return;
9792                 }
9793                 dd_dev_info(ppd->dd,
9794                             "QSFP not responding, waiting and retrying %d\n",
9795                             (int)ppd->qsfp_retry_count);
9796                 ppd->qsfp_retry_count++;
9797                 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9798                                    msecs_to_jiffies(QSFP_RETRY_WAIT));
9799                 return;
9800         }
9801         ppd->qsfp_retry_count = 0;
9802
9803         start_link(ppd);
9804 }
9805
9806 /*
9807  * Workqueue function to start the link after a delay.
9808  */
9809 void handle_start_link(struct work_struct *work)
9810 {
9811         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9812                                                   start_link_work.work);
9813         try_start_link(ppd);
9814 }
9815
9816 int bringup_serdes(struct hfi1_pportdata *ppd)
9817 {
9818         struct hfi1_devdata *dd = ppd->dd;
9819         u64 guid;
9820         int ret;
9821
9822         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9823                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9824
9825         guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9826         if (!guid) {
9827                 if (dd->base_guid)
9828                         guid = dd->base_guid + ppd->port - 1;
9829                 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9830         }
9831
9832         /* Set linkinit_reason on power up per OPA spec */
9833         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9834
9835         /* one-time init of the LCB */
9836         init_lcb(dd);
9837
9838         if (loopback) {
9839                 ret = init_loopback(dd);
9840                 if (ret < 0)
9841                         return ret;
9842         }
9843
9844         get_port_type(ppd);
9845         if (ppd->port_type == PORT_TYPE_QSFP) {
9846                 set_qsfp_int_n(ppd, 0);
9847                 wait_for_qsfp_init(ppd);
9848                 set_qsfp_int_n(ppd, 1);
9849         }
9850
9851         try_start_link(ppd);
9852         return 0;
9853 }
9854
9855 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9856 {
9857         struct hfi1_devdata *dd = ppd->dd;
9858
9859         /*
9860          * Shut down the link and keep it down.   First turn off that the
9861          * driver wants to allow the link to be up (driver_link_ready).
9862          * Then make sure the link is not automatically restarted
9863          * (link_enabled).  Cancel any pending restart.  And finally
9864          * go offline.
9865          */
9866         ppd->driver_link_ready = 0;
9867         ppd->link_enabled = 0;
9868
9869         ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9870         flush_delayed_work(&ppd->start_link_work);
9871         cancel_delayed_work_sync(&ppd->start_link_work);
9872
9873         ppd->offline_disabled_reason =
9874                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9875         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9876                              OPA_LINKDOWN_REASON_REBOOT);
9877         set_link_state(ppd, HLS_DN_OFFLINE);
9878
9879         /* disable the port */
9880         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9881         cancel_work_sync(&ppd->freeze_work);
9882 }
9883
9884 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9885 {
9886         struct hfi1_pportdata *ppd;
9887         int i;
9888
9889         ppd = (struct hfi1_pportdata *)(dd + 1);
9890         for (i = 0; i < dd->num_pports; i++, ppd++) {
9891                 ppd->ibport_data.rvp.rc_acks = NULL;
9892                 ppd->ibport_data.rvp.rc_qacks = NULL;
9893                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9894                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9895                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9896                 if (!ppd->ibport_data.rvp.rc_acks ||
9897                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9898                     !ppd->ibport_data.rvp.rc_qacks)
9899                         return -ENOMEM;
9900         }
9901
9902         return 0;
9903 }
9904
9905 /*
9906  * index is the index into the receive array
9907  */
9908 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9909                   u32 type, unsigned long pa, u16 order)
9910 {
9911         u64 reg;
9912
9913         if (!(dd->flags & HFI1_PRESENT))
9914                 goto done;
9915
9916         if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9917                 pa = 0;
9918                 order = 0;
9919         } else if (type > PT_INVALID) {
9920                 dd_dev_err(dd,
9921                            "unexpected receive array type %u for index %u, not handled\n",
9922                            type, index);
9923                 goto done;
9924         }
9925         trace_hfi1_put_tid(dd, index, type, pa, order);
9926
9927 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9928         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9929                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9930                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9931                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9932         trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9933         writeq(reg, dd->rcvarray_wc + (index * 8));
9934
9935         if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9936                 /*
9937                  * Eager entries are written and flushed
9938                  *
9939                  * Expected entries are flushed every 4 writes
9940                  */
9941                 flush_wc();
9942 done:
9943         return;
9944 }
9945
9946 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9947 {
9948         struct hfi1_devdata *dd = rcd->dd;
9949         u32 i;
9950
9951         /* this could be optimized */
9952         for (i = rcd->eager_base; i < rcd->eager_base +
9953                      rcd->egrbufs.alloced; i++)
9954                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9955
9956         for (i = rcd->expected_base;
9957                         i < rcd->expected_base + rcd->expected_count; i++)
9958                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9959 }
9960
9961 static const char * const ib_cfg_name_strings[] = {
9962         "HFI1_IB_CFG_LIDLMC",
9963         "HFI1_IB_CFG_LWID_DG_ENB",
9964         "HFI1_IB_CFG_LWID_ENB",
9965         "HFI1_IB_CFG_LWID",
9966         "HFI1_IB_CFG_SPD_ENB",
9967         "HFI1_IB_CFG_SPD",
9968         "HFI1_IB_CFG_RXPOL_ENB",
9969         "HFI1_IB_CFG_LREV_ENB",
9970         "HFI1_IB_CFG_LINKLATENCY",
9971         "HFI1_IB_CFG_HRTBT",
9972         "HFI1_IB_CFG_OP_VLS",
9973         "HFI1_IB_CFG_VL_HIGH_CAP",
9974         "HFI1_IB_CFG_VL_LOW_CAP",
9975         "HFI1_IB_CFG_OVERRUN_THRESH",
9976         "HFI1_IB_CFG_PHYERR_THRESH",
9977         "HFI1_IB_CFG_LINKDEFAULT",
9978         "HFI1_IB_CFG_PKEYS",
9979         "HFI1_IB_CFG_MTU",
9980         "HFI1_IB_CFG_LSTATE",
9981         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9982         "HFI1_IB_CFG_PMA_TICKS",
9983         "HFI1_IB_CFG_PORT"
9984 };
9985
9986 static const char *ib_cfg_name(int which)
9987 {
9988         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9989                 return "invalid";
9990         return ib_cfg_name_strings[which];
9991 }
9992
9993 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9994 {
9995         struct hfi1_devdata *dd = ppd->dd;
9996         int val = 0;
9997
9998         switch (which) {
9999         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
10000                 val = ppd->link_width_enabled;
10001                 break;
10002         case HFI1_IB_CFG_LWID: /* currently active Link-width */
10003                 val = ppd->link_width_active;
10004                 break;
10005         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10006                 val = ppd->link_speed_enabled;
10007                 break;
10008         case HFI1_IB_CFG_SPD: /* current Link speed */
10009                 val = ppd->link_speed_active;
10010                 break;
10011
10012         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
10013         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
10014         case HFI1_IB_CFG_LINKLATENCY:
10015                 goto unimplemented;
10016
10017         case HFI1_IB_CFG_OP_VLS:
10018                 val = ppd->actual_vls_operational;
10019                 break;
10020         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
10021                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
10022                 break;
10023         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
10024                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
10025                 break;
10026         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10027                 val = ppd->overrun_threshold;
10028                 break;
10029         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10030                 val = ppd->phy_error_threshold;
10031                 break;
10032         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10033                 val = HLS_DEFAULT;
10034                 break;
10035
10036         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10037         case HFI1_IB_CFG_PMA_TICKS:
10038         default:
10039 unimplemented:
10040                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10041                         dd_dev_info(
10042                                 dd,
10043                                 "%s: which %s: not implemented\n",
10044                                 __func__,
10045                                 ib_cfg_name(which));
10046                 break;
10047         }
10048
10049         return val;
10050 }
10051
10052 /*
10053  * The largest MAD packet size.
10054  */
10055 #define MAX_MAD_PACKET 2048
10056
10057 /*
10058  * Return the maximum header bytes that can go on the _wire_
10059  * for this device. This count includes the ICRC which is
10060  * not part of the packet held in memory but it is appended
10061  * by the HW.
10062  * This is dependent on the device's receive header entry size.
10063  * HFI allows this to be set per-receive context, but the
10064  * driver presently enforces a global value.
10065  */
10066 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10067 {
10068         /*
10069          * The maximum non-payload (MTU) bytes in LRH.PktLen are
10070          * the Receive Header Entry Size minus the PBC (or RHF) size
10071          * plus one DW for the ICRC appended by HW.
10072          *
10073          * dd->rcd[0].rcvhdrqentsize is in DW.
10074          * We use rcd[0] as all context will have the same value. Also,
10075          * the first kernel context would have been allocated by now so
10076          * we are guaranteed a valid value.
10077          */
10078         return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10079 }
10080
10081 /*
10082  * Set Send Length
10083  * @ppd - per port data
10084  *
10085  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10086  * registers compare against LRH.PktLen, so use the max bytes included
10087  * in the LRH.
10088  *
10089  * This routine changes all VL values except VL15, which it maintains at
10090  * the same value.
10091  */
10092 static void set_send_length(struct hfi1_pportdata *ppd)
10093 {
10094         struct hfi1_devdata *dd = ppd->dd;
10095         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10096         u32 maxvlmtu = dd->vld[15].mtu;
10097         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10098                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10099                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10100         int i, j;
10101         u32 thres;
10102
10103         for (i = 0; i < ppd->vls_supported; i++) {
10104                 if (dd->vld[i].mtu > maxvlmtu)
10105                         maxvlmtu = dd->vld[i].mtu;
10106                 if (i <= 3)
10107                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10108                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10109                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10110                 else
10111                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10112                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10113                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10114         }
10115         write_csr(dd, SEND_LEN_CHECK0, len1);
10116         write_csr(dd, SEND_LEN_CHECK1, len2);
10117         /* adjust kernel credit return thresholds based on new MTUs */
10118         /* all kernel receive contexts have the same hdrqentsize */
10119         for (i = 0; i < ppd->vls_supported; i++) {
10120                 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10121                             sc_mtu_to_threshold(dd->vld[i].sc,
10122                                                 dd->vld[i].mtu,
10123                                                 get_hdrqentsize(dd->rcd[0])));
10124                 for (j = 0; j < INIT_SC_PER_VL; j++)
10125                         sc_set_cr_threshold(
10126                                         pio_select_send_context_vl(dd, j, i),
10127                                             thres);
10128         }
10129         thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10130                     sc_mtu_to_threshold(dd->vld[15].sc,
10131                                         dd->vld[15].mtu,
10132                                         dd->rcd[0]->rcvhdrqentsize));
10133         sc_set_cr_threshold(dd->vld[15].sc, thres);
10134
10135         /* Adjust maximum MTU for the port in DC */
10136         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10137                 (ilog2(maxvlmtu >> 8) + 1);
10138         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10139         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10140         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10141                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10142         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10143 }
10144
10145 static void set_lidlmc(struct hfi1_pportdata *ppd)
10146 {
10147         int i;
10148         u64 sreg = 0;
10149         struct hfi1_devdata *dd = ppd->dd;
10150         u32 mask = ~((1U << ppd->lmc) - 1);
10151         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10152         u32 lid;
10153
10154         /*
10155          * Program 0 in CSR if port lid is extended. This prevents
10156          * 9B packets being sent out for large lids.
10157          */
10158         lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10159         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10160                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10161         c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10162                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10163               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10164                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10165         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10166
10167         /*
10168          * Iterate over all the send contexts and set their SLID check
10169          */
10170         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10171                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10172                (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10173                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10174
10175         for (i = 0; i < chip_send_contexts(dd); i++) {
10176                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10177                           i, (u32)sreg);
10178                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10179         }
10180
10181         /* Now we have to do the same thing for the sdma engines */
10182         sdma_update_lmc(dd, mask, lid);
10183 }
10184
10185 static const char *state_completed_string(u32 completed)
10186 {
10187         static const char * const state_completed[] = {
10188                 "EstablishComm",
10189                 "OptimizeEQ",
10190                 "VerifyCap"
10191         };
10192
10193         if (completed < ARRAY_SIZE(state_completed))
10194                 return state_completed[completed];
10195
10196         return "unknown";
10197 }
10198
10199 static const char all_lanes_dead_timeout_expired[] =
10200         "All lanes were inactive â€“ was the interconnect media removed?";
10201 static const char tx_out_of_policy[] =
10202         "Passing lanes on local port do not meet the local link width policy";
10203 static const char no_state_complete[] =
10204         "State timeout occurred before link partner completed the state";
10205 static const char * const state_complete_reasons[] = {
10206         [0x00] = "Reason unknown",
10207         [0x01] = "Link was halted by driver, refer to LinkDownReason",
10208         [0x02] = "Link partner reported failure",
10209         [0x10] = "Unable to achieve frame sync on any lane",
10210         [0x11] =
10211           "Unable to find a common bit rate with the link partner",
10212         [0x12] =
10213           "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10214         [0x13] =
10215           "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10216         [0x14] = no_state_complete,
10217         [0x15] =
10218           "State timeout occurred before link partner identified equalization presets",
10219         [0x16] =
10220           "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10221         [0x17] = tx_out_of_policy,
10222         [0x20] = all_lanes_dead_timeout_expired,
10223         [0x21] =
10224           "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10225         [0x22] = no_state_complete,
10226         [0x23] =
10227           "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10228         [0x24] = tx_out_of_policy,
10229         [0x30] = all_lanes_dead_timeout_expired,
10230         [0x31] =
10231           "State timeout occurred waiting for host to process received frames",
10232         [0x32] = no_state_complete,
10233         [0x33] =
10234           "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10235         [0x34] = tx_out_of_policy,
10236         [0x35] = "Negotiated link width is mutually exclusive",
10237         [0x36] =
10238           "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10239         [0x37] = "Unable to resolve secure data exchange",
10240 };
10241
10242 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10243                                                      u32 code)
10244 {
10245         const char *str = NULL;
10246
10247         if (code < ARRAY_SIZE(state_complete_reasons))
10248                 str = state_complete_reasons[code];
10249
10250         if (str)
10251                 return str;
10252         return "Reserved";
10253 }
10254
10255 /* describe the given last state complete frame */
10256 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10257                                   const char *prefix)
10258 {
10259         struct hfi1_devdata *dd = ppd->dd;
10260         u32 success;
10261         u32 state;
10262         u32 reason;
10263         u32 lanes;
10264
10265         /*
10266          * Decode frame:
10267          *  [ 0: 0] - success
10268          *  [ 3: 1] - state
10269          *  [ 7: 4] - next state timeout
10270          *  [15: 8] - reason code
10271          *  [31:16] - lanes
10272          */
10273         success = frame & 0x1;
10274         state = (frame >> 1) & 0x7;
10275         reason = (frame >> 8) & 0xff;
10276         lanes = (frame >> 16) & 0xffff;
10277
10278         dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10279                    prefix, frame);
10280         dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10281                    state_completed_string(state), state);
10282         dd_dev_err(dd, "    state successfully completed: %s\n",
10283                    success ? "yes" : "no");
10284         dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10285                    reason, state_complete_reason_code_string(ppd, reason));
10286         dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10287 }
10288
10289 /*
10290  * Read the last state complete frames and explain them.  This routine
10291  * expects to be called if the link went down during link negotiation
10292  * and initialization (LNI).  That is, anywhere between polling and link up.
10293  */
10294 static void check_lni_states(struct hfi1_pportdata *ppd)
10295 {
10296         u32 last_local_state;
10297         u32 last_remote_state;
10298
10299         read_last_local_state(ppd->dd, &last_local_state);
10300         read_last_remote_state(ppd->dd, &last_remote_state);
10301
10302         /*
10303          * Don't report anything if there is nothing to report.  A value of
10304          * 0 means the link was taken down while polling and there was no
10305          * training in-process.
10306          */
10307         if (last_local_state == 0 && last_remote_state == 0)
10308                 return;
10309
10310         decode_state_complete(ppd, last_local_state, "transmitted");
10311         decode_state_complete(ppd, last_remote_state, "received");
10312 }
10313
10314 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10315 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10316 {
10317         u64 reg;
10318         unsigned long timeout;
10319
10320         /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10321         timeout = jiffies + msecs_to_jiffies(wait_ms);
10322         while (1) {
10323                 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10324                 if (reg)
10325                         break;
10326                 if (time_after(jiffies, timeout)) {
10327                         dd_dev_err(dd,
10328                                    "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10329                         return -ETIMEDOUT;
10330                 }
10331                 udelay(2);
10332         }
10333         return 0;
10334 }
10335
10336 /* called when the logical link state is not down as it should be */
10337 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10338 {
10339         struct hfi1_devdata *dd = ppd->dd;
10340
10341         /*
10342          * Bring link up in LCB loopback
10343          */
10344         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10345         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10346                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10347
10348         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10349         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10350         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10351         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10352
10353         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10354         (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10355         udelay(3);
10356         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10357         write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10358
10359         wait_link_transfer_active(dd, 100);
10360
10361         /*
10362          * Bring the link down again.
10363          */
10364         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10365         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10366         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10367
10368         dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10369 }
10370
10371 /*
10372  * Helper for set_link_state().  Do not call except from that routine.
10373  * Expects ppd->hls_mutex to be held.
10374  *
10375  * @rem_reason value to be sent to the neighbor
10376  *
10377  * LinkDownReasons only set if transition succeeds.
10378  */
10379 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10380 {
10381         struct hfi1_devdata *dd = ppd->dd;
10382         u32 previous_state;
10383         int offline_state_ret;
10384         int ret;
10385
10386         update_lcb_cache(dd);
10387
10388         previous_state = ppd->host_link_state;
10389         ppd->host_link_state = HLS_GOING_OFFLINE;
10390
10391         /* start offline transition */
10392         ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10393
10394         if (ret != HCMD_SUCCESS) {
10395                 dd_dev_err(dd,
10396                            "Failed to transition to Offline link state, return %d\n",
10397                            ret);
10398                 return -EINVAL;
10399         }
10400         if (ppd->offline_disabled_reason ==
10401                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10402                 ppd->offline_disabled_reason =
10403                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10404
10405         offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10406         if (offline_state_ret < 0)
10407                 return offline_state_ret;
10408
10409         /* Disabling AOC transmitters */
10410         if (ppd->port_type == PORT_TYPE_QSFP &&
10411             ppd->qsfp_info.limiting_active &&
10412             qsfp_mod_present(ppd)) {
10413                 int ret;
10414
10415                 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10416                 if (ret == 0) {
10417                         set_qsfp_tx(ppd, 0);
10418                         release_chip_resource(dd, qsfp_resource(dd));
10419                 } else {
10420                         /* not fatal, but should warn */
10421                         dd_dev_err(dd,
10422                                    "Unable to acquire lock to turn off QSFP TX\n");
10423                 }
10424         }
10425
10426         /*
10427          * Wait for the offline.Quiet transition if it hasn't happened yet. It
10428          * can take a while for the link to go down.
10429          */
10430         if (offline_state_ret != PLS_OFFLINE_QUIET) {
10431                 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10432                 if (ret < 0)
10433                         return ret;
10434         }
10435
10436         /*
10437          * Now in charge of LCB - must be after the physical state is
10438          * offline.quiet and before host_link_state is changed.
10439          */
10440         set_host_lcb_access(dd);
10441         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10442
10443         /* make sure the logical state is also down */
10444         ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10445         if (ret)
10446                 force_logical_link_state_down(ppd);
10447
10448         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10449         update_statusp(ppd, IB_PORT_DOWN);
10450
10451         /*
10452          * The LNI has a mandatory wait time after the physical state
10453          * moves to Offline.Quiet.  The wait time may be different
10454          * depending on how the link went down.  The 8051 firmware
10455          * will observe the needed wait time and only move to ready
10456          * when that is completed.  The largest of the quiet timeouts
10457          * is 6s, so wait that long and then at least 0.5s more for
10458          * other transitions, and another 0.5s for a buffer.
10459          */
10460         ret = wait_fm_ready(dd, 7000);
10461         if (ret) {
10462                 dd_dev_err(dd,
10463                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10464                 /* state is really offline, so make it so */
10465                 ppd->host_link_state = HLS_DN_OFFLINE;
10466                 return ret;
10467         }
10468
10469         /*
10470          * The state is now offline and the 8051 is ready to accept host
10471          * requests.
10472          *      - change our state
10473          *      - notify others if we were previously in a linkup state
10474          */
10475         ppd->host_link_state = HLS_DN_OFFLINE;
10476         if (previous_state & HLS_UP) {
10477                 /* went down while link was up */
10478                 handle_linkup_change(dd, 0);
10479         } else if (previous_state
10480                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10481                 /* went down while attempting link up */
10482                 check_lni_states(ppd);
10483
10484                 /* The QSFP doesn't need to be reset on LNI failure */
10485                 ppd->qsfp_info.reset_needed = 0;
10486         }
10487
10488         /* the active link width (downgrade) is 0 on link down */
10489         ppd->link_width_active = 0;
10490         ppd->link_width_downgrade_tx_active = 0;
10491         ppd->link_width_downgrade_rx_active = 0;
10492         ppd->current_egress_rate = 0;
10493         return 0;
10494 }
10495
10496 /* return the link state name */
10497 static const char *link_state_name(u32 state)
10498 {
10499         const char *name;
10500         int n = ilog2(state);
10501         static const char * const names[] = {
10502                 [__HLS_UP_INIT_BP]       = "INIT",
10503                 [__HLS_UP_ARMED_BP]      = "ARMED",
10504                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10505                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10506                 [__HLS_DN_POLL_BP]       = "POLL",
10507                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
10508                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10509                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10510                 [__HLS_GOING_UP_BP]      = "GOING_UP",
10511                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10512                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10513         };
10514
10515         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10516         return name ? name : "unknown";
10517 }
10518
10519 /* return the link state reason name */
10520 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10521 {
10522         if (state == HLS_UP_INIT) {
10523                 switch (ppd->linkinit_reason) {
10524                 case OPA_LINKINIT_REASON_LINKUP:
10525                         return "(LINKUP)";
10526                 case OPA_LINKINIT_REASON_FLAPPING:
10527                         return "(FLAPPING)";
10528                 case OPA_LINKINIT_OUTSIDE_POLICY:
10529                         return "(OUTSIDE_POLICY)";
10530                 case OPA_LINKINIT_QUARANTINED:
10531                         return "(QUARANTINED)";
10532                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10533                         return "(INSUFIC_CAPABILITY)";
10534                 default:
10535                         break;
10536                 }
10537         }
10538         return "";
10539 }
10540
10541 /*
10542  * driver_pstate - convert the driver's notion of a port's
10543  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10544  * Return -1 (converted to a u32) to indicate error.
10545  */
10546 u32 driver_pstate(struct hfi1_pportdata *ppd)
10547 {
10548         switch (ppd->host_link_state) {
10549         case HLS_UP_INIT:
10550         case HLS_UP_ARMED:
10551         case HLS_UP_ACTIVE:
10552                 return IB_PORTPHYSSTATE_LINKUP;
10553         case HLS_DN_POLL:
10554                 return IB_PORTPHYSSTATE_POLLING;
10555         case HLS_DN_DISABLE:
10556                 return IB_PORTPHYSSTATE_DISABLED;
10557         case HLS_DN_OFFLINE:
10558                 return OPA_PORTPHYSSTATE_OFFLINE;
10559         case HLS_VERIFY_CAP:
10560                 return IB_PORTPHYSSTATE_TRAINING;
10561         case HLS_GOING_UP:
10562                 return IB_PORTPHYSSTATE_TRAINING;
10563         case HLS_GOING_OFFLINE:
10564                 return OPA_PORTPHYSSTATE_OFFLINE;
10565         case HLS_LINK_COOLDOWN:
10566                 return OPA_PORTPHYSSTATE_OFFLINE;
10567         case HLS_DN_DOWNDEF:
10568         default:
10569                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10570                            ppd->host_link_state);
10571                 return  -1;
10572         }
10573 }
10574
10575 /*
10576  * driver_lstate - convert the driver's notion of a port's
10577  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10578  * (converted to a u32) to indicate error.
10579  */
10580 u32 driver_lstate(struct hfi1_pportdata *ppd)
10581 {
10582         if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10583                 return IB_PORT_DOWN;
10584
10585         switch (ppd->host_link_state & HLS_UP) {
10586         case HLS_UP_INIT:
10587                 return IB_PORT_INIT;
10588         case HLS_UP_ARMED:
10589                 return IB_PORT_ARMED;
10590         case HLS_UP_ACTIVE:
10591                 return IB_PORT_ACTIVE;
10592         default:
10593                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10594                            ppd->host_link_state);
10595         return -1;
10596         }
10597 }
10598
10599 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10600                           u8 neigh_reason, u8 rem_reason)
10601 {
10602         if (ppd->local_link_down_reason.latest == 0 &&
10603             ppd->neigh_link_down_reason.latest == 0) {
10604                 ppd->local_link_down_reason.latest = lcl_reason;
10605                 ppd->neigh_link_down_reason.latest = neigh_reason;
10606                 ppd->remote_link_down_reason = rem_reason;
10607         }
10608 }
10609
10610 /**
10611  * data_vls_operational() - Verify if data VL BCT credits and MTU
10612  *                          are both set.
10613  * @ppd: pointer to hfi1_pportdata structure
10614  *
10615  * Return: true - Ok, false -otherwise.
10616  */
10617 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10618 {
10619         int i;
10620         u64 reg;
10621
10622         if (!ppd->actual_vls_operational)
10623                 return false;
10624
10625         for (i = 0; i < ppd->vls_supported; i++) {
10626                 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10627                 if ((reg && !ppd->dd->vld[i].mtu) ||
10628                     (!reg && ppd->dd->vld[i].mtu))
10629                         return false;
10630         }
10631
10632         return true;
10633 }
10634
10635 /*
10636  * Change the physical and/or logical link state.
10637  *
10638  * Do not call this routine while inside an interrupt.  It contains
10639  * calls to routines that can take multiple seconds to finish.
10640  *
10641  * Returns 0 on success, -errno on failure.
10642  */
10643 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10644 {
10645         struct hfi1_devdata *dd = ppd->dd;
10646         struct ib_event event = {.device = NULL};
10647         int ret1, ret = 0;
10648         int orig_new_state, poll_bounce;
10649
10650         mutex_lock(&ppd->hls_lock);
10651
10652         orig_new_state = state;
10653         if (state == HLS_DN_DOWNDEF)
10654                 state = HLS_DEFAULT;
10655
10656         /* interpret poll -> poll as a link bounce */
10657         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10658                       state == HLS_DN_POLL;
10659
10660         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10661                     link_state_name(ppd->host_link_state),
10662                     link_state_name(orig_new_state),
10663                     poll_bounce ? "(bounce) " : "",
10664                     link_state_reason_name(ppd, state));
10665
10666         /*
10667          * If we're going to a (HLS_*) link state that implies the logical
10668          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10669          * reset is_sm_config_started to 0.
10670          */
10671         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10672                 ppd->is_sm_config_started = 0;
10673
10674         /*
10675          * Do nothing if the states match.  Let a poll to poll link bounce
10676          * go through.
10677          */
10678         if (ppd->host_link_state == state && !poll_bounce)
10679                 goto done;
10680
10681         switch (state) {
10682         case HLS_UP_INIT:
10683                 if (ppd->host_link_state == HLS_DN_POLL &&
10684                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10685                         /*
10686                          * Quick link up jumps from polling to here.
10687                          *
10688                          * Whether in normal or loopback mode, the
10689                          * simulator jumps from polling to link up.
10690                          * Accept that here.
10691                          */
10692                         /* OK */
10693                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10694                         goto unexpected;
10695                 }
10696
10697                 /*
10698                  * Wait for Link_Up physical state.
10699                  * Physical and Logical states should already be
10700                  * be transitioned to LinkUp and LinkInit respectively.
10701                  */
10702                 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10703                 if (ret) {
10704                         dd_dev_err(dd,
10705                                    "%s: physical state did not change to LINK-UP\n",
10706                                    __func__);
10707                         break;
10708                 }
10709
10710                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10711                 if (ret) {
10712                         dd_dev_err(dd,
10713                                    "%s: logical state did not change to INIT\n",
10714                                    __func__);
10715                         break;
10716                 }
10717
10718                 /* clear old transient LINKINIT_REASON code */
10719                 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10720                         ppd->linkinit_reason =
10721                                 OPA_LINKINIT_REASON_LINKUP;
10722
10723                 /* enable the port */
10724                 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10725
10726                 handle_linkup_change(dd, 1);
10727                 pio_kernel_linkup(dd);
10728
10729                 /*
10730                  * After link up, a new link width will have been set.
10731                  * Update the xmit counters with regards to the new
10732                  * link width.
10733                  */
10734                 update_xmit_counters(ppd, ppd->link_width_active);
10735
10736                 ppd->host_link_state = HLS_UP_INIT;
10737                 update_statusp(ppd, IB_PORT_INIT);
10738                 break;
10739         case HLS_UP_ARMED:
10740                 if (ppd->host_link_state != HLS_UP_INIT)
10741                         goto unexpected;
10742
10743                 if (!data_vls_operational(ppd)) {
10744                         dd_dev_err(dd,
10745                                    "%s: Invalid data VL credits or mtu\n",
10746                                    __func__);
10747                         ret = -EINVAL;
10748                         break;
10749                 }
10750
10751                 set_logical_state(dd, LSTATE_ARMED);
10752                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10753                 if (ret) {
10754                         dd_dev_err(dd,
10755                                    "%s: logical state did not change to ARMED\n",
10756                                    __func__);
10757                         break;
10758                 }
10759                 ppd->host_link_state = HLS_UP_ARMED;
10760                 update_statusp(ppd, IB_PORT_ARMED);
10761                 /*
10762                  * The simulator does not currently implement SMA messages,
10763                  * so neighbor_normal is not set.  Set it here when we first
10764                  * move to Armed.
10765                  */
10766                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10767                         ppd->neighbor_normal = 1;
10768                 break;
10769         case HLS_UP_ACTIVE:
10770                 if (ppd->host_link_state != HLS_UP_ARMED)
10771                         goto unexpected;
10772
10773                 set_logical_state(dd, LSTATE_ACTIVE);
10774                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10775                 if (ret) {
10776                         dd_dev_err(dd,
10777                                    "%s: logical state did not change to ACTIVE\n",
10778                                    __func__);
10779                 } else {
10780                         /* tell all engines to go running */
10781                         sdma_all_running(dd);
10782                         ppd->host_link_state = HLS_UP_ACTIVE;
10783                         update_statusp(ppd, IB_PORT_ACTIVE);
10784
10785                         /* Signal the IB layer that the port has went active */
10786                         event.device = &dd->verbs_dev.rdi.ibdev;
10787                         event.element.port_num = ppd->port;
10788                         event.event = IB_EVENT_PORT_ACTIVE;
10789                 }
10790                 break;
10791         case HLS_DN_POLL:
10792                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10793                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10794                     dd->dc_shutdown)
10795                         dc_start(dd);
10796                 /* Hand LED control to the DC */
10797                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10798
10799                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10800                         u8 tmp = ppd->link_enabled;
10801
10802                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10803                         if (ret) {
10804                                 ppd->link_enabled = tmp;
10805                                 break;
10806                         }
10807                         ppd->remote_link_down_reason = 0;
10808
10809                         if (ppd->driver_link_ready)
10810                                 ppd->link_enabled = 1;
10811                 }
10812
10813                 set_all_slowpath(ppd->dd);
10814                 ret = set_local_link_attributes(ppd);
10815                 if (ret)
10816                         break;
10817
10818                 ppd->port_error_action = 0;
10819
10820                 if (quick_linkup) {
10821                         /* quick linkup does not go into polling */
10822                         ret = do_quick_linkup(dd);
10823                 } else {
10824                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10825                         if (!ret1)
10826                                 ret1 = wait_phys_link_out_of_offline(ppd,
10827                                                                      3000);
10828                         if (ret1 != HCMD_SUCCESS) {
10829                                 dd_dev_err(dd,
10830                                            "Failed to transition to Polling link state, return 0x%x\n",
10831                                            ret1);
10832                                 ret = -EINVAL;
10833                         }
10834                 }
10835
10836                 /*
10837                  * Change the host link state after requesting DC8051 to
10838                  * change its physical state so that we can ignore any
10839                  * interrupt with stale LNI(XX) error, which will not be
10840                  * cleared until DC8051 transitions to Polling state.
10841                  */
10842                 ppd->host_link_state = HLS_DN_POLL;
10843                 ppd->offline_disabled_reason =
10844                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10845                 /*
10846                  * If an error occurred above, go back to offline.  The
10847                  * caller may reschedule another attempt.
10848                  */
10849                 if (ret)
10850                         goto_offline(ppd, 0);
10851                 else
10852                         log_physical_state(ppd, PLS_POLLING);
10853                 break;
10854         case HLS_DN_DISABLE:
10855                 /* link is disabled */
10856                 ppd->link_enabled = 0;
10857
10858                 /* allow any state to transition to disabled */
10859
10860                 /* must transition to offline first */
10861                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10862                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10863                         if (ret)
10864                                 break;
10865                         ppd->remote_link_down_reason = 0;
10866                 }
10867
10868                 if (!dd->dc_shutdown) {
10869                         ret1 = set_physical_link_state(dd, PLS_DISABLED);
10870                         if (ret1 != HCMD_SUCCESS) {
10871                                 dd_dev_err(dd,
10872                                            "Failed to transition to Disabled link state, return 0x%x\n",
10873                                            ret1);
10874                                 ret = -EINVAL;
10875                                 break;
10876                         }
10877                         ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10878                         if (ret) {
10879                                 dd_dev_err(dd,
10880                                            "%s: physical state did not change to DISABLED\n",
10881                                            __func__);
10882                                 break;
10883                         }
10884                         dc_shutdown(dd);
10885                 }
10886                 ppd->host_link_state = HLS_DN_DISABLE;
10887                 break;
10888         case HLS_DN_OFFLINE:
10889                 if (ppd->host_link_state == HLS_DN_DISABLE)
10890                         dc_start(dd);
10891
10892                 /* allow any state to transition to offline */
10893                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10894                 if (!ret)
10895                         ppd->remote_link_down_reason = 0;
10896                 break;
10897         case HLS_VERIFY_CAP:
10898                 if (ppd->host_link_state != HLS_DN_POLL)
10899                         goto unexpected;
10900                 ppd->host_link_state = HLS_VERIFY_CAP;
10901                 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10902                 break;
10903         case HLS_GOING_UP:
10904                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10905                         goto unexpected;
10906
10907                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10908                 if (ret1 != HCMD_SUCCESS) {
10909                         dd_dev_err(dd,
10910                                    "Failed to transition to link up state, return 0x%x\n",
10911                                    ret1);
10912                         ret = -EINVAL;
10913                         break;
10914                 }
10915                 ppd->host_link_state = HLS_GOING_UP;
10916                 break;
10917
10918         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10919         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10920         default:
10921                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10922                             __func__, state);
10923                 ret = -EINVAL;
10924                 break;
10925         }
10926
10927         goto done;
10928
10929 unexpected:
10930         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10931                    __func__, link_state_name(ppd->host_link_state),
10932                    link_state_name(state));
10933         ret = -EINVAL;
10934
10935 done:
10936         mutex_unlock(&ppd->hls_lock);
10937
10938         if (event.device)
10939                 ib_dispatch_event(&event);
10940
10941         return ret;
10942 }
10943
10944 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10945 {
10946         u64 reg;
10947         int ret = 0;
10948
10949         switch (which) {
10950         case HFI1_IB_CFG_LIDLMC:
10951                 set_lidlmc(ppd);
10952                 break;
10953         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10954                 /*
10955                  * The VL Arbitrator high limit is sent in units of 4k
10956                  * bytes, while HFI stores it in units of 64 bytes.
10957                  */
10958                 val *= 4096 / 64;
10959                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10960                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10961                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10962                 break;
10963         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10964                 /* HFI only supports POLL as the default link down state */
10965                 if (val != HLS_DN_POLL)
10966                         ret = -EINVAL;
10967                 break;
10968         case HFI1_IB_CFG_OP_VLS:
10969                 if (ppd->vls_operational != val) {
10970                         ppd->vls_operational = val;
10971                         if (!ppd->port)
10972                                 ret = -EINVAL;
10973                 }
10974                 break;
10975         /*
10976          * For link width, link width downgrade, and speed enable, always AND
10977          * the setting with what is actually supported.  This has two benefits.
10978          * First, enabled can't have unsupported values, no matter what the
10979          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10980          * "fill in with your supported value" have all the bits in the
10981          * field set, so simply ANDing with supported has the desired result.
10982          */
10983         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10984                 ppd->link_width_enabled = val & ppd->link_width_supported;
10985                 break;
10986         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10987                 ppd->link_width_downgrade_enabled =
10988                                 val & ppd->link_width_downgrade_supported;
10989                 break;
10990         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10991                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10992                 break;
10993         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10994                 /*
10995                  * HFI does not follow IB specs, save this value
10996                  * so we can report it, if asked.
10997                  */
10998                 ppd->overrun_threshold = val;
10999                 break;
11000         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
11001                 /*
11002                  * HFI does not follow IB specs, save this value
11003                  * so we can report it, if asked.
11004                  */
11005                 ppd->phy_error_threshold = val;
11006                 break;
11007
11008         case HFI1_IB_CFG_MTU:
11009                 set_send_length(ppd);
11010                 break;
11011
11012         case HFI1_IB_CFG_PKEYS:
11013                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
11014                         set_partition_keys(ppd);
11015                 break;
11016
11017         default:
11018                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11019                         dd_dev_info(ppd->dd,
11020                                     "%s: which %s, val 0x%x: not implemented\n",
11021                                     __func__, ib_cfg_name(which), val);
11022                 break;
11023         }
11024         return ret;
11025 }
11026
11027 /* begin functions related to vl arbitration table caching */
11028 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11029 {
11030         int i;
11031
11032         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11033                         VL_ARB_LOW_PRIO_TABLE_SIZE);
11034         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11035                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
11036
11037         /*
11038          * Note that we always return values directly from the
11039          * 'vl_arb_cache' (and do no CSR reads) in response to a
11040          * 'Get(VLArbTable)'. This is obviously correct after a
11041          * 'Set(VLArbTable)', since the cache will then be up to
11042          * date. But it's also correct prior to any 'Set(VLArbTable)'
11043          * since then both the cache, and the relevant h/w registers
11044          * will be zeroed.
11045          */
11046
11047         for (i = 0; i < MAX_PRIO_TABLE; i++)
11048                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11049 }
11050
11051 /*
11052  * vl_arb_lock_cache
11053  *
11054  * All other vl_arb_* functions should be called only after locking
11055  * the cache.
11056  */
11057 static inline struct vl_arb_cache *
11058 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11059 {
11060         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11061                 return NULL;
11062         spin_lock(&ppd->vl_arb_cache[idx].lock);
11063         return &ppd->vl_arb_cache[idx];
11064 }
11065
11066 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11067 {
11068         spin_unlock(&ppd->vl_arb_cache[idx].lock);
11069 }
11070
11071 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11072                              struct ib_vl_weight_elem *vl)
11073 {
11074         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11075 }
11076
11077 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11078                              struct ib_vl_weight_elem *vl)
11079 {
11080         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11081 }
11082
11083 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11084                               struct ib_vl_weight_elem *vl)
11085 {
11086         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11087 }
11088
11089 /* end functions related to vl arbitration table caching */
11090
11091 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11092                           u32 size, struct ib_vl_weight_elem *vl)
11093 {
11094         struct hfi1_devdata *dd = ppd->dd;
11095         u64 reg;
11096         unsigned int i, is_up = 0;
11097         int drain, ret = 0;
11098
11099         mutex_lock(&ppd->hls_lock);
11100
11101         if (ppd->host_link_state & HLS_UP)
11102                 is_up = 1;
11103
11104         drain = !is_ax(dd) && is_up;
11105
11106         if (drain)
11107                 /*
11108                  * Before adjusting VL arbitration weights, empty per-VL
11109                  * FIFOs, otherwise a packet whose VL weight is being
11110                  * set to 0 could get stuck in a FIFO with no chance to
11111                  * egress.
11112                  */
11113                 ret = stop_drain_data_vls(dd);
11114
11115         if (ret) {
11116                 dd_dev_err(
11117                         dd,
11118                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11119                         __func__);
11120                 goto err;
11121         }
11122
11123         for (i = 0; i < size; i++, vl++) {
11124                 /*
11125                  * NOTE: The low priority shift and mask are used here, but
11126                  * they are the same for both the low and high registers.
11127                  */
11128                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11129                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11130                       | (((u64)vl->weight
11131                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11132                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11133                 write_csr(dd, target + (i * 8), reg);
11134         }
11135         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11136
11137         if (drain)
11138                 open_fill_data_vls(dd); /* reopen all VLs */
11139
11140 err:
11141         mutex_unlock(&ppd->hls_lock);
11142
11143         return ret;
11144 }
11145
11146 /*
11147  * Read one credit merge VL register.
11148  */
11149 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11150                            struct vl_limit *vll)
11151 {
11152         u64 reg = read_csr(dd, csr);
11153
11154         vll->dedicated = cpu_to_be16(
11155                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11156                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11157         vll->shared = cpu_to_be16(
11158                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11159                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11160 }
11161
11162 /*
11163  * Read the current credit merge limits.
11164  */
11165 static int get_buffer_control(struct hfi1_devdata *dd,
11166                               struct buffer_control *bc, u16 *overall_limit)
11167 {
11168         u64 reg;
11169         int i;
11170
11171         /* not all entries are filled in */
11172         memset(bc, 0, sizeof(*bc));
11173
11174         /* OPA and HFI have a 1-1 mapping */
11175         for (i = 0; i < TXE_NUM_DATA_VL; i++)
11176                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11177
11178         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11179         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11180
11181         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11182         bc->overall_shared_limit = cpu_to_be16(
11183                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11184                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11185         if (overall_limit)
11186                 *overall_limit = (reg
11187                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11188                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11189         return sizeof(struct buffer_control);
11190 }
11191
11192 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11193 {
11194         u64 reg;
11195         int i;
11196
11197         /* each register contains 16 SC->VLnt mappings, 4 bits each */
11198         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11199         for (i = 0; i < sizeof(u64); i++) {
11200                 u8 byte = *(((u8 *)&reg) + i);
11201
11202                 dp->vlnt[2 * i] = byte & 0xf;
11203                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11204         }
11205
11206         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11207         for (i = 0; i < sizeof(u64); i++) {
11208                 u8 byte = *(((u8 *)&reg) + i);
11209
11210                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11211                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11212         }
11213         return sizeof(struct sc2vlnt);
11214 }
11215
11216 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11217                               struct ib_vl_weight_elem *vl)
11218 {
11219         unsigned int i;
11220
11221         for (i = 0; i < nelems; i++, vl++) {
11222                 vl->vl = 0xf;
11223                 vl->weight = 0;
11224         }
11225 }
11226
11227 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11228 {
11229         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11230                   DC_SC_VL_VAL(15_0,
11231                                0, dp->vlnt[0] & 0xf,
11232                                1, dp->vlnt[1] & 0xf,
11233                                2, dp->vlnt[2] & 0xf,
11234                                3, dp->vlnt[3] & 0xf,
11235                                4, dp->vlnt[4] & 0xf,
11236                                5, dp->vlnt[5] & 0xf,
11237                                6, dp->vlnt[6] & 0xf,
11238                                7, dp->vlnt[7] & 0xf,
11239                                8, dp->vlnt[8] & 0xf,
11240                                9, dp->vlnt[9] & 0xf,
11241                                10, dp->vlnt[10] & 0xf,
11242                                11, dp->vlnt[11] & 0xf,
11243                                12, dp->vlnt[12] & 0xf,
11244                                13, dp->vlnt[13] & 0xf,
11245                                14, dp->vlnt[14] & 0xf,
11246                                15, dp->vlnt[15] & 0xf));
11247         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11248                   DC_SC_VL_VAL(31_16,
11249                                16, dp->vlnt[16] & 0xf,
11250                                17, dp->vlnt[17] & 0xf,
11251                                18, dp->vlnt[18] & 0xf,
11252                                19, dp->vlnt[19] & 0xf,
11253                                20, dp->vlnt[20] & 0xf,
11254                                21, dp->vlnt[21] & 0xf,
11255                                22, dp->vlnt[22] & 0xf,
11256                                23, dp->vlnt[23] & 0xf,
11257                                24, dp->vlnt[24] & 0xf,
11258                                25, dp->vlnt[25] & 0xf,
11259                                26, dp->vlnt[26] & 0xf,
11260                                27, dp->vlnt[27] & 0xf,
11261                                28, dp->vlnt[28] & 0xf,
11262                                29, dp->vlnt[29] & 0xf,
11263                                30, dp->vlnt[30] & 0xf,
11264                                31, dp->vlnt[31] & 0xf));
11265 }
11266
11267 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11268                         u16 limit)
11269 {
11270         if (limit != 0)
11271                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11272                             what, (int)limit, idx);
11273 }
11274
11275 /* change only the shared limit portion of SendCmGLobalCredit */
11276 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11277 {
11278         u64 reg;
11279
11280         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11281         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11282         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11283         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11284 }
11285
11286 /* change only the total credit limit portion of SendCmGLobalCredit */
11287 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11288 {
11289         u64 reg;
11290
11291         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11292         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11293         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11294         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11295 }
11296
11297 /* set the given per-VL shared limit */
11298 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11299 {
11300         u64 reg;
11301         u32 addr;
11302
11303         if (vl < TXE_NUM_DATA_VL)
11304                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11305         else
11306                 addr = SEND_CM_CREDIT_VL15;
11307
11308         reg = read_csr(dd, addr);
11309         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11310         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11311         write_csr(dd, addr, reg);
11312 }
11313
11314 /* set the given per-VL dedicated limit */
11315 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11316 {
11317         u64 reg;
11318         u32 addr;
11319
11320         if (vl < TXE_NUM_DATA_VL)
11321                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11322         else
11323                 addr = SEND_CM_CREDIT_VL15;
11324
11325         reg = read_csr(dd, addr);
11326         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11327         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11328         write_csr(dd, addr, reg);
11329 }
11330
11331 /* spin until the given per-VL status mask bits clear */
11332 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11333                                      const char *which)
11334 {
11335         unsigned long timeout;
11336         u64 reg;
11337
11338         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11339         while (1) {
11340                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11341
11342                 if (reg == 0)
11343                         return; /* success */
11344                 if (time_after(jiffies, timeout))
11345                         break;          /* timed out */
11346                 udelay(1);
11347         }
11348
11349         dd_dev_err(dd,
11350                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11351                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11352         /*
11353          * If this occurs, it is likely there was a credit loss on the link.
11354          * The only recovery from that is a link bounce.
11355          */
11356         dd_dev_err(dd,
11357                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11358 }
11359
11360 /*
11361  * The number of credits on the VLs may be changed while everything
11362  * is "live", but the following algorithm must be followed due to
11363  * how the hardware is actually implemented.  In particular,
11364  * Return_Credit_Status[] is the only correct status check.
11365  *
11366  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11367  *     set Global_Shared_Credit_Limit = 0
11368  *     use_all_vl = 1
11369  * mask0 = all VLs that are changing either dedicated or shared limits
11370  * set Shared_Limit[mask0] = 0
11371  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11372  * if (changing any dedicated limit)
11373  *     mask1 = all VLs that are lowering dedicated limits
11374  *     lower Dedicated_Limit[mask1]
11375  *     spin until Return_Credit_Status[mask1] == 0
11376  *     raise Dedicated_Limits
11377  * raise Shared_Limits
11378  * raise Global_Shared_Credit_Limit
11379  *
11380  * lower = if the new limit is lower, set the limit to the new value
11381  * raise = if the new limit is higher than the current value (may be changed
11382  *      earlier in the algorithm), set the new limit to the new value
11383  */
11384 int set_buffer_control(struct hfi1_pportdata *ppd,
11385                        struct buffer_control *new_bc)
11386 {
11387         struct hfi1_devdata *dd = ppd->dd;
11388         u64 changing_mask, ld_mask, stat_mask;
11389         int change_count;
11390         int i, use_all_mask;
11391         int this_shared_changing;
11392         int vl_count = 0, ret;
11393         /*
11394          * A0: add the variable any_shared_limit_changing below and in the
11395          * algorithm above.  If removing A0 support, it can be removed.
11396          */
11397         int any_shared_limit_changing;
11398         struct buffer_control cur_bc;
11399         u8 changing[OPA_MAX_VLS];
11400         u8 lowering_dedicated[OPA_MAX_VLS];
11401         u16 cur_total;
11402         u32 new_total = 0;
11403         const u64 all_mask =
11404         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11405          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11406          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11407          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11408          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11409          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11410          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11411          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11412          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11413
11414 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11415 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
11416
11417         /* find the new total credits, do sanity check on unused VLs */
11418         for (i = 0; i < OPA_MAX_VLS; i++) {
11419                 if (valid_vl(i)) {
11420                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11421                         continue;
11422                 }
11423                 nonzero_msg(dd, i, "dedicated",
11424                             be16_to_cpu(new_bc->vl[i].dedicated));
11425                 nonzero_msg(dd, i, "shared",
11426                             be16_to_cpu(new_bc->vl[i].shared));
11427                 new_bc->vl[i].dedicated = 0;
11428                 new_bc->vl[i].shared = 0;
11429         }
11430         new_total += be16_to_cpu(new_bc->overall_shared_limit);
11431
11432         /* fetch the current values */
11433         get_buffer_control(dd, &cur_bc, &cur_total);
11434
11435         /*
11436          * Create the masks we will use.
11437          */
11438         memset(changing, 0, sizeof(changing));
11439         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11440         /*
11441          * NOTE: Assumes that the individual VL bits are adjacent and in
11442          * increasing order
11443          */
11444         stat_mask =
11445                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11446         changing_mask = 0;
11447         ld_mask = 0;
11448         change_count = 0;
11449         any_shared_limit_changing = 0;
11450         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11451                 if (!valid_vl(i))
11452                         continue;
11453                 this_shared_changing = new_bc->vl[i].shared
11454                                                 != cur_bc.vl[i].shared;
11455                 if (this_shared_changing)
11456                         any_shared_limit_changing = 1;
11457                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11458                     this_shared_changing) {
11459                         changing[i] = 1;
11460                         changing_mask |= stat_mask;
11461                         change_count++;
11462                 }
11463                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11464                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
11465                         lowering_dedicated[i] = 1;
11466                         ld_mask |= stat_mask;
11467                 }
11468         }
11469
11470         /* bracket the credit change with a total adjustment */
11471         if (new_total > cur_total)
11472                 set_global_limit(dd, new_total);
11473
11474         /*
11475          * Start the credit change algorithm.
11476          */
11477         use_all_mask = 0;
11478         if ((be16_to_cpu(new_bc->overall_shared_limit) <
11479              be16_to_cpu(cur_bc.overall_shared_limit)) ||
11480             (is_ax(dd) && any_shared_limit_changing)) {
11481                 set_global_shared(dd, 0);
11482                 cur_bc.overall_shared_limit = 0;
11483                 use_all_mask = 1;
11484         }
11485
11486         for (i = 0; i < NUM_USABLE_VLS; i++) {
11487                 if (!valid_vl(i))
11488                         continue;
11489
11490                 if (changing[i]) {
11491                         set_vl_shared(dd, i, 0);
11492                         cur_bc.vl[i].shared = 0;
11493                 }
11494         }
11495
11496         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11497                                  "shared");
11498
11499         if (change_count > 0) {
11500                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11501                         if (!valid_vl(i))
11502                                 continue;
11503
11504                         if (lowering_dedicated[i]) {
11505                                 set_vl_dedicated(dd, i,
11506                                                  be16_to_cpu(new_bc->
11507                                                              vl[i].dedicated));
11508                                 cur_bc.vl[i].dedicated =
11509                                                 new_bc->vl[i].dedicated;
11510                         }
11511                 }
11512
11513                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11514
11515                 /* now raise all dedicated that are going up */
11516                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11517                         if (!valid_vl(i))
11518                                 continue;
11519
11520                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
11521                                         be16_to_cpu(cur_bc.vl[i].dedicated))
11522                                 set_vl_dedicated(dd, i,
11523                                                  be16_to_cpu(new_bc->
11524                                                              vl[i].dedicated));
11525                 }
11526         }
11527
11528         /* next raise all shared that are going up */
11529         for (i = 0; i < NUM_USABLE_VLS; i++) {
11530                 if (!valid_vl(i))
11531                         continue;
11532
11533                 if (be16_to_cpu(new_bc->vl[i].shared) >
11534                                 be16_to_cpu(cur_bc.vl[i].shared))
11535                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11536         }
11537
11538         /* finally raise the global shared */
11539         if (be16_to_cpu(new_bc->overall_shared_limit) >
11540             be16_to_cpu(cur_bc.overall_shared_limit))
11541                 set_global_shared(dd,
11542                                   be16_to_cpu(new_bc->overall_shared_limit));
11543
11544         /* bracket the credit change with a total adjustment */
11545         if (new_total < cur_total)
11546                 set_global_limit(dd, new_total);
11547
11548         /*
11549          * Determine the actual number of operational VLS using the number of
11550          * dedicated and shared credits for each VL.
11551          */
11552         if (change_count > 0) {
11553                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11554                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11555                             be16_to_cpu(new_bc->vl[i].shared) > 0)
11556                                 vl_count++;
11557                 ppd->actual_vls_operational = vl_count;
11558                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11559                                     ppd->actual_vls_operational :
11560                                     ppd->vls_operational,
11561                                     NULL);
11562                 if (ret == 0)
11563                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11564                                            ppd->actual_vls_operational :
11565                                            ppd->vls_operational, NULL);
11566                 if (ret)
11567                         return ret;
11568         }
11569         return 0;
11570 }
11571
11572 /*
11573  * Read the given fabric manager table. Return the size of the
11574  * table (in bytes) on success, and a negative error code on
11575  * failure.
11576  */
11577 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11578
11579 {
11580         int size;
11581         struct vl_arb_cache *vlc;
11582
11583         switch (which) {
11584         case FM_TBL_VL_HIGH_ARB:
11585                 size = 256;
11586                 /*
11587                  * OPA specifies 128 elements (of 2 bytes each), though
11588                  * HFI supports only 16 elements in h/w.
11589                  */
11590                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11591                 vl_arb_get_cache(vlc, t);
11592                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11593                 break;
11594         case FM_TBL_VL_LOW_ARB:
11595                 size = 256;
11596                 /*
11597                  * OPA specifies 128 elements (of 2 bytes each), though
11598                  * HFI supports only 16 elements in h/w.
11599                  */
11600                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11601                 vl_arb_get_cache(vlc, t);
11602                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11603                 break;
11604         case FM_TBL_BUFFER_CONTROL:
11605                 size = get_buffer_control(ppd->dd, t, NULL);
11606                 break;
11607         case FM_TBL_SC2VLNT:
11608                 size = get_sc2vlnt(ppd->dd, t);
11609                 break;
11610         case FM_TBL_VL_PREEMPT_ELEMS:
11611                 size = 256;
11612                 /* OPA specifies 128 elements, of 2 bytes each */
11613                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11614                 break;
11615         case FM_TBL_VL_PREEMPT_MATRIX:
11616                 size = 256;
11617                 /*
11618                  * OPA specifies that this is the same size as the VL
11619                  * arbitration tables (i.e., 256 bytes).
11620                  */
11621                 break;
11622         default:
11623                 return -EINVAL;
11624         }
11625         return size;
11626 }
11627
11628 /*
11629  * Write the given fabric manager table.
11630  */
11631 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11632 {
11633         int ret = 0;
11634         struct vl_arb_cache *vlc;
11635
11636         switch (which) {
11637         case FM_TBL_VL_HIGH_ARB:
11638                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11639                 if (vl_arb_match_cache(vlc, t)) {
11640                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11641                         break;
11642                 }
11643                 vl_arb_set_cache(vlc, t);
11644                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11645                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11646                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11647                 break;
11648         case FM_TBL_VL_LOW_ARB:
11649                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11650                 if (vl_arb_match_cache(vlc, t)) {
11651                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11652                         break;
11653                 }
11654                 vl_arb_set_cache(vlc, t);
11655                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11656                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11657                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11658                 break;
11659         case FM_TBL_BUFFER_CONTROL:
11660                 ret = set_buffer_control(ppd, t);
11661                 break;
11662         case FM_TBL_SC2VLNT:
11663                 set_sc2vlnt(ppd->dd, t);
11664                 break;
11665         default:
11666                 ret = -EINVAL;
11667         }
11668         return ret;
11669 }
11670
11671 /*
11672  * Disable all data VLs.
11673  *
11674  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11675  */
11676 static int disable_data_vls(struct hfi1_devdata *dd)
11677 {
11678         if (is_ax(dd))
11679                 return 1;
11680
11681         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11682
11683         return 0;
11684 }
11685
11686 /*
11687  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11688  * Just re-enables all data VLs (the "fill" part happens
11689  * automatically - the name was chosen for symmetry with
11690  * stop_drain_data_vls()).
11691  *
11692  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11693  */
11694 int open_fill_data_vls(struct hfi1_devdata *dd)
11695 {
11696         if (is_ax(dd))
11697                 return 1;
11698
11699         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11700
11701         return 0;
11702 }
11703
11704 /*
11705  * drain_data_vls() - assumes that disable_data_vls() has been called,
11706  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11707  * engines to drop to 0.
11708  */
11709 static void drain_data_vls(struct hfi1_devdata *dd)
11710 {
11711         sc_wait(dd);
11712         sdma_wait(dd);
11713         pause_for_credit_return(dd);
11714 }
11715
11716 /*
11717  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11718  *
11719  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11720  * meant to be used like this:
11721  *
11722  * stop_drain_data_vls(dd);
11723  * // do things with per-VL resources
11724  * open_fill_data_vls(dd);
11725  */
11726 int stop_drain_data_vls(struct hfi1_devdata *dd)
11727 {
11728         int ret;
11729
11730         ret = disable_data_vls(dd);
11731         if (ret == 0)
11732                 drain_data_vls(dd);
11733
11734         return ret;
11735 }
11736
11737 /*
11738  * Convert a nanosecond time to a cclock count.  No matter how slow
11739  * the cclock, a non-zero ns will always have a non-zero result.
11740  */
11741 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11742 {
11743         u32 cclocks;
11744
11745         if (dd->icode == ICODE_FPGA_EMULATION)
11746                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11747         else  /* simulation pretends to be ASIC */
11748                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11749         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11750                 cclocks = 1;
11751         return cclocks;
11752 }
11753
11754 /*
11755  * Convert a cclock count to nanoseconds. Not matter how slow
11756  * the cclock, a non-zero cclocks will always have a non-zero result.
11757  */
11758 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11759 {
11760         u32 ns;
11761
11762         if (dd->icode == ICODE_FPGA_EMULATION)
11763                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11764         else  /* simulation pretends to be ASIC */
11765                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11766         if (cclocks && !ns)
11767                 ns = 1;
11768         return ns;
11769 }
11770
11771 /*
11772  * Dynamically adjust the receive interrupt timeout for a context based on
11773  * incoming packet rate.
11774  *
11775  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11776  */
11777 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11778 {
11779         struct hfi1_devdata *dd = rcd->dd;
11780         u32 timeout = rcd->rcvavail_timeout;
11781
11782         /*
11783          * This algorithm doubles or halves the timeout depending on whether
11784          * the number of packets received in this interrupt were less than or
11785          * greater equal the interrupt count.
11786          *
11787          * The calculations below do not allow a steady state to be achieved.
11788          * Only at the endpoints it is possible to have an unchanging
11789          * timeout.
11790          */
11791         if (npkts < rcv_intr_count) {
11792                 /*
11793                  * Not enough packets arrived before the timeout, adjust
11794                  * timeout downward.
11795                  */
11796                 if (timeout < 2) /* already at minimum? */
11797                         return;
11798                 timeout >>= 1;
11799         } else {
11800                 /*
11801                  * More than enough packets arrived before the timeout, adjust
11802                  * timeout upward.
11803                  */
11804                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11805                         return;
11806                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11807         }
11808
11809         rcd->rcvavail_timeout = timeout;
11810         /*
11811          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11812          * been verified to be in range
11813          */
11814         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11815                         (u64)timeout <<
11816                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11817 }
11818
11819 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11820                     u32 intr_adjust, u32 npkts)
11821 {
11822         struct hfi1_devdata *dd = rcd->dd;
11823         u64 reg;
11824         u32 ctxt = rcd->ctxt;
11825
11826         /*
11827          * Need to write timeout register before updating RcvHdrHead to ensure
11828          * that a new value is used when the HW decides to restart counting.
11829          */
11830         if (intr_adjust)
11831                 adjust_rcv_timeout(rcd, npkts);
11832         if (updegr) {
11833                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11834                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11835                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11836         }
11837         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11838                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11839                         << RCV_HDR_HEAD_HEAD_SHIFT);
11840         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11841 }
11842
11843 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11844 {
11845         u32 head, tail;
11846
11847         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11848                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11849
11850         if (hfi1_rcvhdrtail_kvaddr(rcd))
11851                 tail = get_rcvhdrtail(rcd);
11852         else
11853                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11854
11855         return head == tail;
11856 }
11857
11858 /*
11859  * Context Control and Receive Array encoding for buffer size:
11860  *      0x0 invalid
11861  *      0x1   4 KB
11862  *      0x2   8 KB
11863  *      0x3  16 KB
11864  *      0x4  32 KB
11865  *      0x5  64 KB
11866  *      0x6 128 KB
11867  *      0x7 256 KB
11868  *      0x8 512 KB (Receive Array only)
11869  *      0x9   1 MB (Receive Array only)
11870  *      0xa   2 MB (Receive Array only)
11871  *
11872  *      0xB-0xF - reserved (Receive Array only)
11873  *
11874  *
11875  * This routine assumes that the value has already been sanity checked.
11876  */
11877 static u32 encoded_size(u32 size)
11878 {
11879         switch (size) {
11880         case   4 * 1024: return 0x1;
11881         case   8 * 1024: return 0x2;
11882         case  16 * 1024: return 0x3;
11883         case  32 * 1024: return 0x4;
11884         case  64 * 1024: return 0x5;
11885         case 128 * 1024: return 0x6;
11886         case 256 * 1024: return 0x7;
11887         case 512 * 1024: return 0x8;
11888         case   1 * 1024 * 1024: return 0x9;
11889         case   2 * 1024 * 1024: return 0xa;
11890         }
11891         return 0x1;     /* if invalid, go with the minimum size */
11892 }
11893
11894 /**
11895  * encode_rcv_header_entry_size - return chip specific encoding for size
11896  * @size: size in dwords
11897  *
11898  * Convert a receive header entry size that to the encoding used in the CSR.
11899  *
11900  * Return a zero if the given size is invalid, otherwise the encoding.
11901  */
11902 u8 encode_rcv_header_entry_size(u8 size)
11903 {
11904         /* there are only 3 valid receive header entry sizes */
11905         if (size == 2)
11906                 return 1;
11907         if (size == 16)
11908                 return 2;
11909         if (size == 32)
11910                 return 4;
11911         return 0; /* invalid */
11912 }
11913
11914 /**
11915  * hfi1_validate_rcvhdrcnt - validate hdrcnt
11916  * @dd: the device data
11917  * @thecnt: the header count
11918  */
11919 int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
11920 {
11921         if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
11922                 dd_dev_err(dd, "Receive header queue count too small\n");
11923                 return -EINVAL;
11924         }
11925
11926         if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
11927                 dd_dev_err(dd,
11928                            "Receive header queue count cannot be greater than %u\n",
11929                            HFI1_MAX_HDRQ_EGRBUF_CNT);
11930                 return -EINVAL;
11931         }
11932
11933         if (thecnt % HDRQ_INCREMENT) {
11934                 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
11935                            thecnt, HDRQ_INCREMENT);
11936                 return -EINVAL;
11937         }
11938
11939         return 0;
11940 }
11941
11942 /**
11943  * set_hdrq_regs - set header queue registers for context
11944  * @dd: the device data
11945  * @ctxt: the context
11946  * @entsize: the dword entry size
11947  * @hdrcnt: the number of header entries
11948  */
11949 void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt)
11950 {
11951         u64 reg;
11952
11953         reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) <<
11954               RCV_HDR_CNT_CNT_SHIFT;
11955         write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg);
11956         reg = ((u64)encode_rcv_header_entry_size(entsize) &
11957                RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) <<
11958               RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
11959         write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg);
11960         reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) <<
11961               RCV_HDR_SIZE_HDR_SIZE_SHIFT;
11962         write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg);
11963
11964         /*
11965          * Program dummy tail address for every receive context
11966          * before enabling any receive context
11967          */
11968         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11969                         dd->rcvhdrtail_dummy_dma);
11970 }
11971
11972 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11973                   struct hfi1_ctxtdata *rcd)
11974 {
11975         u64 rcvctrl, reg;
11976         int did_enable = 0;
11977         u16 ctxt;
11978
11979         if (!rcd)
11980                 return;
11981
11982         ctxt = rcd->ctxt;
11983
11984         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11985
11986         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11987         /* if the context already enabled, don't do the extra steps */
11988         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11989             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11990                 /* reset the tail and hdr addresses, and sequence count */
11991                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11992                                 rcd->rcvhdrq_dma);
11993                 if (hfi1_rcvhdrtail_kvaddr(rcd))
11994                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11995                                         rcd->rcvhdrqtailaddr_dma);
11996                 hfi1_set_seq_cnt(rcd, 1);
11997
11998                 /* reset the cached receive header queue head value */
11999                 hfi1_set_rcd_head(rcd, 0);
12000
12001                 /*
12002                  * Zero the receive header queue so we don't get false
12003                  * positives when checking the sequence number.  The
12004                  * sequence numbers could land exactly on the same spot.
12005                  * E.g. a rcd restart before the receive header wrapped.
12006                  */
12007                 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
12008
12009                 /* starting timeout */
12010                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
12011
12012                 /* enable the context */
12013                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
12014
12015                 /* clean the egr buffer size first */
12016                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12017                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
12018                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
12019                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
12020
12021                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
12022                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
12023                 did_enable = 1;
12024
12025                 /* zero RcvEgrIndexHead */
12026                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
12027
12028                 /* set eager count and base index */
12029                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
12030                         & RCV_EGR_CTRL_EGR_CNT_MASK)
12031                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
12032                         (((rcd->eager_base >> RCV_SHIFT)
12033                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
12034                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
12035                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
12036
12037                 /*
12038                  * Set TID (expected) count and base index.
12039                  * rcd->expected_count is set to individual RcvArray entries,
12040                  * not pairs, and the CSR takes a pair-count in groups of
12041                  * four, so divide by 8.
12042                  */
12043                 reg = (((rcd->expected_count >> RCV_SHIFT)
12044                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
12045                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
12046                       (((rcd->expected_base >> RCV_SHIFT)
12047                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
12048                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
12049                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
12050                 if (ctxt == HFI1_CTRL_CTXT)
12051                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
12052         }
12053         if (op & HFI1_RCVCTRL_CTXT_DIS) {
12054                 write_csr(dd, RCV_VL15, 0);
12055                 /*
12056                  * When receive context is being disabled turn on tail
12057                  * update with a dummy tail address and then disable
12058                  * receive context.
12059                  */
12060                 if (dd->rcvhdrtail_dummy_dma) {
12061                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12062                                         dd->rcvhdrtail_dummy_dma);
12063                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
12064                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12065                 }
12066
12067                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
12068         }
12069         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
12070                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
12071                               IS_RCVAVAIL_START + rcd->ctxt, true);
12072                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
12073         }
12074         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
12075                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
12076                               IS_RCVAVAIL_START + rcd->ctxt, false);
12077                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
12078         }
12079         if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd))
12080                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12081         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
12082                 /* See comment on RcvCtxtCtrl.TailUpd above */
12083                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
12084                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12085         }
12086         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
12087                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
12088         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
12089                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
12090         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
12091                 /*
12092                  * In one-packet-per-eager mode, the size comes from
12093                  * the RcvArray entry.
12094                  */
12095                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12096                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12097         }
12098         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
12099                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12100         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
12101                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12102         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
12103                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12104         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12105                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12106         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12107                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12108         if (op & HFI1_RCVCTRL_URGENT_ENB)
12109                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12110                               IS_RCVURGENT_START + rcd->ctxt, true);
12111         if (op & HFI1_RCVCTRL_URGENT_DIS)
12112                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12113                               IS_RCVURGENT_START + rcd->ctxt, false);
12114
12115         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12116         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12117
12118         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12119         if (did_enable &&
12120             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12121                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12122                 if (reg != 0) {
12123                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12124                                     ctxt, reg);
12125                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12126                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12127                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12128                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12129                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12130                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12131                                     ctxt, reg, reg == 0 ? "not" : "still");
12132                 }
12133         }
12134
12135         if (did_enable) {
12136                 /*
12137                  * The interrupt timeout and count must be set after
12138                  * the context is enabled to take effect.
12139                  */
12140                 /* set interrupt timeout */
12141                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12142                                 (u64)rcd->rcvavail_timeout <<
12143                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12144
12145                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12146                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12147                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12148         }
12149
12150         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12151                 /*
12152                  * If the context has been disabled and the Tail Update has
12153                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12154                  * so it doesn't contain an address that is invalid.
12155                  */
12156                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12157                                 dd->rcvhdrtail_dummy_dma);
12158 }
12159
12160 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12161 {
12162         int ret;
12163         u64 val = 0;
12164
12165         if (namep) {
12166                 ret = dd->cntrnameslen;
12167                 *namep = dd->cntrnames;
12168         } else {
12169                 const struct cntr_entry *entry;
12170                 int i, j;
12171
12172                 ret = (dd->ndevcntrs) * sizeof(u64);
12173
12174                 /* Get the start of the block of counters */
12175                 *cntrp = dd->cntrs;
12176
12177                 /*
12178                  * Now go and fill in each counter in the block.
12179                  */
12180                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12181                         entry = &dev_cntrs[i];
12182                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12183                         if (entry->flags & CNTR_DISABLED) {
12184                                 /* Nothing */
12185                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12186                         } else {
12187                                 if (entry->flags & CNTR_VL) {
12188                                         hfi1_cdbg(CNTR, "\tPer VL\n");
12189                                         for (j = 0; j < C_VL_COUNT; j++) {
12190                                                 val = entry->rw_cntr(entry,
12191                                                                   dd, j,
12192                                                                   CNTR_MODE_R,
12193                                                                   0);
12194                                                 hfi1_cdbg(
12195                                                    CNTR,
12196                                                    "\t\tRead 0x%llx for %d\n",
12197                                                    val, j);
12198                                                 dd->cntrs[entry->offset + j] =
12199                                                                             val;
12200                                         }
12201                                 } else if (entry->flags & CNTR_SDMA) {
12202                                         hfi1_cdbg(CNTR,
12203                                                   "\t Per SDMA Engine\n");
12204                                         for (j = 0; j < chip_sdma_engines(dd);
12205                                              j++) {
12206                                                 val =
12207                                                 entry->rw_cntr(entry, dd, j,
12208                                                                CNTR_MODE_R, 0);
12209                                                 hfi1_cdbg(CNTR,
12210                                                           "\t\tRead 0x%llx for %d\n",
12211                                                           val, j);
12212                                                 dd->cntrs[entry->offset + j] =
12213                                                                         val;
12214                                         }
12215                                 } else {
12216                                         val = entry->rw_cntr(entry, dd,
12217                                                         CNTR_INVALID_VL,
12218                                                         CNTR_MODE_R, 0);
12219                                         dd->cntrs[entry->offset] = val;
12220                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12221                                 }
12222                         }
12223                 }
12224         }
12225         return ret;
12226 }
12227
12228 /*
12229  * Used by sysfs to create files for hfi stats to read
12230  */
12231 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12232 {
12233         int ret;
12234         u64 val = 0;
12235
12236         if (namep) {
12237                 ret = ppd->dd->portcntrnameslen;
12238                 *namep = ppd->dd->portcntrnames;
12239         } else {
12240                 const struct cntr_entry *entry;
12241                 int i, j;
12242
12243                 ret = ppd->dd->nportcntrs * sizeof(u64);
12244                 *cntrp = ppd->cntrs;
12245
12246                 for (i = 0; i < PORT_CNTR_LAST; i++) {
12247                         entry = &port_cntrs[i];
12248                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12249                         if (entry->flags & CNTR_DISABLED) {
12250                                 /* Nothing */
12251                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12252                                 continue;
12253                         }
12254
12255                         if (entry->flags & CNTR_VL) {
12256                                 hfi1_cdbg(CNTR, "\tPer VL");
12257                                 for (j = 0; j < C_VL_COUNT; j++) {
12258                                         val = entry->rw_cntr(entry, ppd, j,
12259                                                                CNTR_MODE_R,
12260                                                                0);
12261                                         hfi1_cdbg(
12262                                            CNTR,
12263                                            "\t\tRead 0x%llx for %d",
12264                                            val, j);
12265                                         ppd->cntrs[entry->offset + j] = val;
12266                                 }
12267                         } else {
12268                                 val = entry->rw_cntr(entry, ppd,
12269                                                        CNTR_INVALID_VL,
12270                                                        CNTR_MODE_R,
12271                                                        0);
12272                                 ppd->cntrs[entry->offset] = val;
12273                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12274                         }
12275                 }
12276         }
12277         return ret;
12278 }
12279
12280 static void free_cntrs(struct hfi1_devdata *dd)
12281 {
12282         struct hfi1_pportdata *ppd;
12283         int i;
12284
12285         if (dd->synth_stats_timer.function)
12286                 del_timer_sync(&dd->synth_stats_timer);
12287         ppd = (struct hfi1_pportdata *)(dd + 1);
12288         for (i = 0; i < dd->num_pports; i++, ppd++) {
12289                 kfree(ppd->cntrs);
12290                 kfree(ppd->scntrs);
12291                 free_percpu(ppd->ibport_data.rvp.rc_acks);
12292                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12293                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12294                 ppd->cntrs = NULL;
12295                 ppd->scntrs = NULL;
12296                 ppd->ibport_data.rvp.rc_acks = NULL;
12297                 ppd->ibport_data.rvp.rc_qacks = NULL;
12298                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12299         }
12300         kfree(dd->portcntrnames);
12301         dd->portcntrnames = NULL;
12302         kfree(dd->cntrs);
12303         dd->cntrs = NULL;
12304         kfree(dd->scntrs);
12305         dd->scntrs = NULL;
12306         kfree(dd->cntrnames);
12307         dd->cntrnames = NULL;
12308         if (dd->update_cntr_wq) {
12309                 destroy_workqueue(dd->update_cntr_wq);
12310                 dd->update_cntr_wq = NULL;
12311         }
12312 }
12313
12314 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12315                               u64 *psval, void *context, int vl)
12316 {
12317         u64 val;
12318         u64 sval = *psval;
12319
12320         if (entry->flags & CNTR_DISABLED) {
12321                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12322                 return 0;
12323         }
12324
12325         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12326
12327         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12328
12329         /* If its a synthetic counter there is more work we need to do */
12330         if (entry->flags & CNTR_SYNTH) {
12331                 if (sval == CNTR_MAX) {
12332                         /* No need to read already saturated */
12333                         return CNTR_MAX;
12334                 }
12335
12336                 if (entry->flags & CNTR_32BIT) {
12337                         /* 32bit counters can wrap multiple times */
12338                         u64 upper = sval >> 32;
12339                         u64 lower = (sval << 32) >> 32;
12340
12341                         if (lower > val) { /* hw wrapped */
12342                                 if (upper == CNTR_32BIT_MAX)
12343                                         val = CNTR_MAX;
12344                                 else
12345                                         upper++;
12346                         }
12347
12348                         if (val != CNTR_MAX)
12349                                 val = (upper << 32) | val;
12350
12351                 } else {
12352                         /* If we rolled we are saturated */
12353                         if ((val < sval) || (val > CNTR_MAX))
12354                                 val = CNTR_MAX;
12355                 }
12356         }
12357
12358         *psval = val;
12359
12360         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12361
12362         return val;
12363 }
12364
12365 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12366                                struct cntr_entry *entry,
12367                                u64 *psval, void *context, int vl, u64 data)
12368 {
12369         u64 val;
12370
12371         if (entry->flags & CNTR_DISABLED) {
12372                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12373                 return 0;
12374         }
12375
12376         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12377
12378         if (entry->flags & CNTR_SYNTH) {
12379                 *psval = data;
12380                 if (entry->flags & CNTR_32BIT) {
12381                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12382                                              (data << 32) >> 32);
12383                         val = data; /* return the full 64bit value */
12384                 } else {
12385                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12386                                              data);
12387                 }
12388         } else {
12389                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12390         }
12391
12392         *psval = val;
12393
12394         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12395
12396         return val;
12397 }
12398
12399 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12400 {
12401         struct cntr_entry *entry;
12402         u64 *sval;
12403
12404         entry = &dev_cntrs[index];
12405         sval = dd->scntrs + entry->offset;
12406
12407         if (vl != CNTR_INVALID_VL)
12408                 sval += vl;
12409
12410         return read_dev_port_cntr(dd, entry, sval, dd, vl);
12411 }
12412
12413 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12414 {
12415         struct cntr_entry *entry;
12416         u64 *sval;
12417
12418         entry = &dev_cntrs[index];
12419         sval = dd->scntrs + entry->offset;
12420
12421         if (vl != CNTR_INVALID_VL)
12422                 sval += vl;
12423
12424         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12425 }
12426
12427 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12428 {
12429         struct cntr_entry *entry;
12430         u64 *sval;
12431
12432         entry = &port_cntrs[index];
12433         sval = ppd->scntrs + entry->offset;
12434
12435         if (vl != CNTR_INVALID_VL)
12436                 sval += vl;
12437
12438         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12439             (index <= C_RCV_HDR_OVF_LAST)) {
12440                 /* We do not want to bother for disabled contexts */
12441                 return 0;
12442         }
12443
12444         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12445 }
12446
12447 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12448 {
12449         struct cntr_entry *entry;
12450         u64 *sval;
12451
12452         entry = &port_cntrs[index];
12453         sval = ppd->scntrs + entry->offset;
12454
12455         if (vl != CNTR_INVALID_VL)
12456                 sval += vl;
12457
12458         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12459             (index <= C_RCV_HDR_OVF_LAST)) {
12460                 /* We do not want to bother for disabled contexts */
12461                 return 0;
12462         }
12463
12464         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12465 }
12466
12467 static void do_update_synth_timer(struct work_struct *work)
12468 {
12469         u64 cur_tx;
12470         u64 cur_rx;
12471         u64 total_flits;
12472         u8 update = 0;
12473         int i, j, vl;
12474         struct hfi1_pportdata *ppd;
12475         struct cntr_entry *entry;
12476         struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12477                                                update_cntr_work);
12478
12479         /*
12480          * Rather than keep beating on the CSRs pick a minimal set that we can
12481          * check to watch for potential roll over. We can do this by looking at
12482          * the number of flits sent/recv. If the total flits exceeds 32bits then
12483          * we have to iterate all the counters and update.
12484          */
12485         entry = &dev_cntrs[C_DC_RCV_FLITS];
12486         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12487
12488         entry = &dev_cntrs[C_DC_XMIT_FLITS];
12489         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12490
12491         hfi1_cdbg(
12492             CNTR,
12493             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12494             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12495
12496         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12497                 /*
12498                  * May not be strictly necessary to update but it won't hurt and
12499                  * simplifies the logic here.
12500                  */
12501                 update = 1;
12502                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12503                           dd->unit);
12504         } else {
12505                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12506                 hfi1_cdbg(CNTR,
12507                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12508                           total_flits, (u64)CNTR_32BIT_MAX);
12509                 if (total_flits >= CNTR_32BIT_MAX) {
12510                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12511                                   dd->unit);
12512                         update = 1;
12513                 }
12514         }
12515
12516         if (update) {
12517                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12518                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12519                         entry = &dev_cntrs[i];
12520                         if (entry->flags & CNTR_VL) {
12521                                 for (vl = 0; vl < C_VL_COUNT; vl++)
12522                                         read_dev_cntr(dd, i, vl);
12523                         } else {
12524                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12525                         }
12526                 }
12527                 ppd = (struct hfi1_pportdata *)(dd + 1);
12528                 for (i = 0; i < dd->num_pports; i++, ppd++) {
12529                         for (j = 0; j < PORT_CNTR_LAST; j++) {
12530                                 entry = &port_cntrs[j];
12531                                 if (entry->flags & CNTR_VL) {
12532                                         for (vl = 0; vl < C_VL_COUNT; vl++)
12533                                                 read_port_cntr(ppd, j, vl);
12534                                 } else {
12535                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
12536                                 }
12537                         }
12538                 }
12539
12540                 /*
12541                  * We want the value in the register. The goal is to keep track
12542                  * of the number of "ticks" not the counter value. In other
12543                  * words if the register rolls we want to notice it and go ahead
12544                  * and force an update.
12545                  */
12546                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12547                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12548                                                 CNTR_MODE_R, 0);
12549
12550                 entry = &dev_cntrs[C_DC_RCV_FLITS];
12551                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12552                                                 CNTR_MODE_R, 0);
12553
12554                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12555                           dd->unit, dd->last_tx, dd->last_rx);
12556
12557         } else {
12558                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12559         }
12560 }
12561
12562 static void update_synth_timer(struct timer_list *t)
12563 {
12564         struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12565
12566         queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12567         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12568 }
12569
12570 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12571 static int init_cntrs(struct hfi1_devdata *dd)
12572 {
12573         int i, rcv_ctxts, j;
12574         size_t sz;
12575         char *p;
12576         char name[C_MAX_NAME];
12577         struct hfi1_pportdata *ppd;
12578         const char *bit_type_32 = ",32";
12579         const int bit_type_32_sz = strlen(bit_type_32);
12580         u32 sdma_engines = chip_sdma_engines(dd);
12581
12582         /* set up the stats timer; the add_timer is done at the end */
12583         timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12584
12585         /***********************/
12586         /* per device counters */
12587         /***********************/
12588
12589         /* size names and determine how many we have*/
12590         dd->ndevcntrs = 0;
12591         sz = 0;
12592
12593         for (i = 0; i < DEV_CNTR_LAST; i++) {
12594                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12595                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12596                         continue;
12597                 }
12598
12599                 if (dev_cntrs[i].flags & CNTR_VL) {
12600                         dev_cntrs[i].offset = dd->ndevcntrs;
12601                         for (j = 0; j < C_VL_COUNT; j++) {
12602                                 snprintf(name, C_MAX_NAME, "%s%d",
12603                                          dev_cntrs[i].name, vl_from_idx(j));
12604                                 sz += strlen(name);
12605                                 /* Add ",32" for 32-bit counters */
12606                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12607                                         sz += bit_type_32_sz;
12608                                 sz++;
12609                                 dd->ndevcntrs++;
12610                         }
12611                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12612                         dev_cntrs[i].offset = dd->ndevcntrs;
12613                         for (j = 0; j < sdma_engines; j++) {
12614                                 snprintf(name, C_MAX_NAME, "%s%d",
12615                                          dev_cntrs[i].name, j);
12616                                 sz += strlen(name);
12617                                 /* Add ",32" for 32-bit counters */
12618                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12619                                         sz += bit_type_32_sz;
12620                                 sz++;
12621                                 dd->ndevcntrs++;
12622                         }
12623                 } else {
12624                         /* +1 for newline. */
12625                         sz += strlen(dev_cntrs[i].name) + 1;
12626                         /* Add ",32" for 32-bit counters */
12627                         if (dev_cntrs[i].flags & CNTR_32BIT)
12628                                 sz += bit_type_32_sz;
12629                         dev_cntrs[i].offset = dd->ndevcntrs;
12630                         dd->ndevcntrs++;
12631                 }
12632         }
12633
12634         /* allocate space for the counter values */
12635         dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12636                             GFP_KERNEL);
12637         if (!dd->cntrs)
12638                 goto bail;
12639
12640         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12641         if (!dd->scntrs)
12642                 goto bail;
12643
12644         /* allocate space for the counter names */
12645         dd->cntrnameslen = sz;
12646         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12647         if (!dd->cntrnames)
12648                 goto bail;
12649
12650         /* fill in the names */
12651         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12652                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12653                         /* Nothing */
12654                 } else if (dev_cntrs[i].flags & CNTR_VL) {
12655                         for (j = 0; j < C_VL_COUNT; j++) {
12656                                 snprintf(name, C_MAX_NAME, "%s%d",
12657                                          dev_cntrs[i].name,
12658                                          vl_from_idx(j));
12659                                 memcpy(p, name, strlen(name));
12660                                 p += strlen(name);
12661
12662                                 /* Counter is 32 bits */
12663                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12664                                         memcpy(p, bit_type_32, bit_type_32_sz);
12665                                         p += bit_type_32_sz;
12666                                 }
12667
12668                                 *p++ = '\n';
12669                         }
12670                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12671                         for (j = 0; j < sdma_engines; j++) {
12672                                 snprintf(name, C_MAX_NAME, "%s%d",
12673                                          dev_cntrs[i].name, j);
12674                                 memcpy(p, name, strlen(name));
12675                                 p += strlen(name);
12676
12677                                 /* Counter is 32 bits */
12678                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12679                                         memcpy(p, bit_type_32, bit_type_32_sz);
12680                                         p += bit_type_32_sz;
12681                                 }
12682
12683                                 *p++ = '\n';
12684                         }
12685                 } else {
12686                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12687                         p += strlen(dev_cntrs[i].name);
12688
12689                         /* Counter is 32 bits */
12690                         if (dev_cntrs[i].flags & CNTR_32BIT) {
12691                                 memcpy(p, bit_type_32, bit_type_32_sz);
12692                                 p += bit_type_32_sz;
12693                         }
12694
12695                         *p++ = '\n';
12696                 }
12697         }
12698
12699         /*********************/
12700         /* per port counters */
12701         /*********************/
12702
12703         /*
12704          * Go through the counters for the overflows and disable the ones we
12705          * don't need. This varies based on platform so we need to do it
12706          * dynamically here.
12707          */
12708         rcv_ctxts = dd->num_rcv_contexts;
12709         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12710              i <= C_RCV_HDR_OVF_LAST; i++) {
12711                 port_cntrs[i].flags |= CNTR_DISABLED;
12712         }
12713
12714         /* size port counter names and determine how many we have*/
12715         sz = 0;
12716         dd->nportcntrs = 0;
12717         for (i = 0; i < PORT_CNTR_LAST; i++) {
12718                 if (port_cntrs[i].flags & CNTR_DISABLED) {
12719                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12720                         continue;
12721                 }
12722
12723                 if (port_cntrs[i].flags & CNTR_VL) {
12724                         port_cntrs[i].offset = dd->nportcntrs;
12725                         for (j = 0; j < C_VL_COUNT; j++) {
12726                                 snprintf(name, C_MAX_NAME, "%s%d",
12727                                          port_cntrs[i].name, vl_from_idx(j));
12728                                 sz += strlen(name);
12729                                 /* Add ",32" for 32-bit counters */
12730                                 if (port_cntrs[i].flags & CNTR_32BIT)
12731                                         sz += bit_type_32_sz;
12732                                 sz++;
12733                                 dd->nportcntrs++;
12734                         }
12735                 } else {
12736                         /* +1 for newline */
12737                         sz += strlen(port_cntrs[i].name) + 1;
12738                         /* Add ",32" for 32-bit counters */
12739                         if (port_cntrs[i].flags & CNTR_32BIT)
12740                                 sz += bit_type_32_sz;
12741                         port_cntrs[i].offset = dd->nportcntrs;
12742                         dd->nportcntrs++;
12743                 }
12744         }
12745
12746         /* allocate space for the counter names */
12747         dd->portcntrnameslen = sz;
12748         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12749         if (!dd->portcntrnames)
12750                 goto bail;
12751
12752         /* fill in port cntr names */
12753         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12754                 if (port_cntrs[i].flags & CNTR_DISABLED)
12755                         continue;
12756
12757                 if (port_cntrs[i].flags & CNTR_VL) {
12758                         for (j = 0; j < C_VL_COUNT; j++) {
12759                                 snprintf(name, C_MAX_NAME, "%s%d",
12760                                          port_cntrs[i].name, vl_from_idx(j));
12761                                 memcpy(p, name, strlen(name));
12762                                 p += strlen(name);
12763
12764                                 /* Counter is 32 bits */
12765                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12766                                         memcpy(p, bit_type_32, bit_type_32_sz);
12767                                         p += bit_type_32_sz;
12768                                 }
12769
12770                                 *p++ = '\n';
12771                         }
12772                 } else {
12773                         memcpy(p, port_cntrs[i].name,
12774                                strlen(port_cntrs[i].name));
12775                         p += strlen(port_cntrs[i].name);
12776
12777                         /* Counter is 32 bits */
12778                         if (port_cntrs[i].flags & CNTR_32BIT) {
12779                                 memcpy(p, bit_type_32, bit_type_32_sz);
12780                                 p += bit_type_32_sz;
12781                         }
12782
12783                         *p++ = '\n';
12784                 }
12785         }
12786
12787         /* allocate per port storage for counter values */
12788         ppd = (struct hfi1_pportdata *)(dd + 1);
12789         for (i = 0; i < dd->num_pports; i++, ppd++) {
12790                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12791                 if (!ppd->cntrs)
12792                         goto bail;
12793
12794                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12795                 if (!ppd->scntrs)
12796                         goto bail;
12797         }
12798
12799         /* CPU counters need to be allocated and zeroed */
12800         if (init_cpu_counters(dd))
12801                 goto bail;
12802
12803         dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12804                                                      WQ_MEM_RECLAIM, dd->unit);
12805         if (!dd->update_cntr_wq)
12806                 goto bail;
12807
12808         INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12809
12810         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12811         return 0;
12812 bail:
12813         free_cntrs(dd);
12814         return -ENOMEM;
12815 }
12816
12817 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12818 {
12819         switch (chip_lstate) {
12820         default:
12821                 dd_dev_err(dd,
12822                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12823                            chip_lstate);
12824                 /* fall through */
12825         case LSTATE_DOWN:
12826                 return IB_PORT_DOWN;
12827         case LSTATE_INIT:
12828                 return IB_PORT_INIT;
12829         case LSTATE_ARMED:
12830                 return IB_PORT_ARMED;
12831         case LSTATE_ACTIVE:
12832                 return IB_PORT_ACTIVE;
12833         }
12834 }
12835
12836 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12837 {
12838         /* look at the HFI meta-states only */
12839         switch (chip_pstate & 0xf0) {
12840         default:
12841                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12842                            chip_pstate);
12843                 /* fall through */
12844         case PLS_DISABLED:
12845                 return IB_PORTPHYSSTATE_DISABLED;
12846         case PLS_OFFLINE:
12847                 return OPA_PORTPHYSSTATE_OFFLINE;
12848         case PLS_POLLING:
12849                 return IB_PORTPHYSSTATE_POLLING;
12850         case PLS_CONFIGPHY:
12851                 return IB_PORTPHYSSTATE_TRAINING;
12852         case PLS_LINKUP:
12853                 return IB_PORTPHYSSTATE_LINKUP;
12854         case PLS_PHYTEST:
12855                 return IB_PORTPHYSSTATE_PHY_TEST;
12856         }
12857 }
12858
12859 /* return the OPA port logical state name */
12860 const char *opa_lstate_name(u32 lstate)
12861 {
12862         static const char * const port_logical_names[] = {
12863                 "PORT_NOP",
12864                 "PORT_DOWN",
12865                 "PORT_INIT",
12866                 "PORT_ARMED",
12867                 "PORT_ACTIVE",
12868                 "PORT_ACTIVE_DEFER",
12869         };
12870         if (lstate < ARRAY_SIZE(port_logical_names))
12871                 return port_logical_names[lstate];
12872         return "unknown";
12873 }
12874
12875 /* return the OPA port physical state name */
12876 const char *opa_pstate_name(u32 pstate)
12877 {
12878         static const char * const port_physical_names[] = {
12879                 "PHYS_NOP",
12880                 "reserved1",
12881                 "PHYS_POLL",
12882                 "PHYS_DISABLED",
12883                 "PHYS_TRAINING",
12884                 "PHYS_LINKUP",
12885                 "PHYS_LINK_ERR_RECOVER",
12886                 "PHYS_PHY_TEST",
12887                 "reserved8",
12888                 "PHYS_OFFLINE",
12889                 "PHYS_GANGED",
12890                 "PHYS_TEST",
12891         };
12892         if (pstate < ARRAY_SIZE(port_physical_names))
12893                 return port_physical_names[pstate];
12894         return "unknown";
12895 }
12896
12897 /**
12898  * update_statusp - Update userspace status flag
12899  * @ppd: Port data structure
12900  * @state: port state information
12901  *
12902  * Actual port status is determined by the host_link_state value
12903  * in the ppd.
12904  *
12905  * host_link_state MUST be updated before updating the user space
12906  * statusp.
12907  */
12908 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12909 {
12910         /*
12911          * Set port status flags in the page mapped into userspace
12912          * memory. Do it here to ensure a reliable state - this is
12913          * the only function called by all state handling code.
12914          * Always set the flags due to the fact that the cache value
12915          * might have been changed explicitly outside of this
12916          * function.
12917          */
12918         if (ppd->statusp) {
12919                 switch (state) {
12920                 case IB_PORT_DOWN:
12921                 case IB_PORT_INIT:
12922                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12923                                            HFI1_STATUS_IB_READY);
12924                         break;
12925                 case IB_PORT_ARMED:
12926                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12927                         break;
12928                 case IB_PORT_ACTIVE:
12929                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12930                         break;
12931                 }
12932         }
12933         dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12934                     opa_lstate_name(state), state);
12935 }
12936
12937 /**
12938  * wait_logical_linkstate - wait for an IB link state change to occur
12939  * @ppd: port device
12940  * @state: the state to wait for
12941  * @msecs: the number of milliseconds to wait
12942  *
12943  * Wait up to msecs milliseconds for IB link state change to occur.
12944  * For now, take the easy polling route.
12945  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12946  */
12947 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12948                                   int msecs)
12949 {
12950         unsigned long timeout;
12951         u32 new_state;
12952
12953         timeout = jiffies + msecs_to_jiffies(msecs);
12954         while (1) {
12955                 new_state = chip_to_opa_lstate(ppd->dd,
12956                                                read_logical_state(ppd->dd));
12957                 if (new_state == state)
12958                         break;
12959                 if (time_after(jiffies, timeout)) {
12960                         dd_dev_err(ppd->dd,
12961                                    "timeout waiting for link state 0x%x\n",
12962                                    state);
12963                         return -ETIMEDOUT;
12964                 }
12965                 msleep(20);
12966         }
12967
12968         return 0;
12969 }
12970
12971 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12972 {
12973         u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12974
12975         dd_dev_info(ppd->dd,
12976                     "physical state changed to %s (0x%x), phy 0x%x\n",
12977                     opa_pstate_name(ib_pstate), ib_pstate, state);
12978 }
12979
12980 /*
12981  * Read the physical hardware link state and check if it matches host
12982  * drivers anticipated state.
12983  */
12984 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12985 {
12986         u32 read_state = read_physical_state(ppd->dd);
12987
12988         if (read_state == state) {
12989                 log_state_transition(ppd, state);
12990         } else {
12991                 dd_dev_err(ppd->dd,
12992                            "anticipated phy link state 0x%x, read 0x%x\n",
12993                            state, read_state);
12994         }
12995 }
12996
12997 /*
12998  * wait_physical_linkstate - wait for an physical link state change to occur
12999  * @ppd: port device
13000  * @state: the state to wait for
13001  * @msecs: the number of milliseconds to wait
13002  *
13003  * Wait up to msecs milliseconds for physical link state change to occur.
13004  * Returns 0 if state reached, otherwise -ETIMEDOUT.
13005  */
13006 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
13007                                    int msecs)
13008 {
13009         u32 read_state;
13010         unsigned long timeout;
13011
13012         timeout = jiffies + msecs_to_jiffies(msecs);
13013         while (1) {
13014                 read_state = read_physical_state(ppd->dd);
13015                 if (read_state == state)
13016                         break;
13017                 if (time_after(jiffies, timeout)) {
13018                         dd_dev_err(ppd->dd,
13019                                    "timeout waiting for phy link state 0x%x\n",
13020                                    state);
13021                         return -ETIMEDOUT;
13022                 }
13023                 usleep_range(1950, 2050); /* sleep 2ms-ish */
13024         }
13025
13026         log_state_transition(ppd, state);
13027         return 0;
13028 }
13029
13030 /*
13031  * wait_phys_link_offline_quiet_substates - wait for any offline substate
13032  * @ppd: port device
13033  * @msecs: the number of milliseconds to wait
13034  *
13035  * Wait up to msecs milliseconds for any offline physical link
13036  * state change to occur.
13037  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13038  */
13039 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
13040                                             int msecs)
13041 {
13042         u32 read_state;
13043         unsigned long timeout;
13044
13045         timeout = jiffies + msecs_to_jiffies(msecs);
13046         while (1) {
13047                 read_state = read_physical_state(ppd->dd);
13048                 if ((read_state & 0xF0) == PLS_OFFLINE)
13049                         break;
13050                 if (time_after(jiffies, timeout)) {
13051                         dd_dev_err(ppd->dd,
13052                                    "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
13053                                    read_state, msecs);
13054                         return -ETIMEDOUT;
13055                 }
13056                 usleep_range(1950, 2050); /* sleep 2ms-ish */
13057         }
13058
13059         log_state_transition(ppd, read_state);
13060         return read_state;
13061 }
13062
13063 /*
13064  * wait_phys_link_out_of_offline - wait for any out of offline state
13065  * @ppd: port device
13066  * @msecs: the number of milliseconds to wait
13067  *
13068  * Wait up to msecs milliseconds for any out of offline physical link
13069  * state change to occur.
13070  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13071  */
13072 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
13073                                          int msecs)
13074 {
13075         u32 read_state;
13076         unsigned long timeout;
13077
13078         timeout = jiffies + msecs_to_jiffies(msecs);
13079         while (1) {
13080                 read_state = read_physical_state(ppd->dd);
13081                 if ((read_state & 0xF0) != PLS_OFFLINE)
13082                         break;
13083                 if (time_after(jiffies, timeout)) {
13084                         dd_dev_err(ppd->dd,
13085                                    "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
13086                                    read_state, msecs);
13087                         return -ETIMEDOUT;
13088                 }
13089                 usleep_range(1950, 2050); /* sleep 2ms-ish */
13090         }
13091
13092         log_state_transition(ppd, read_state);
13093         return read_state;
13094 }
13095
13096 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
13097 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13098
13099 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
13100 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13101
13102 void hfi1_init_ctxt(struct send_context *sc)
13103 {
13104         if (sc) {
13105                 struct hfi1_devdata *dd = sc->dd;
13106                 u64 reg;
13107                 u8 set = (sc->type == SC_USER ?
13108                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13109                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13110                 reg = read_kctxt_csr(dd, sc->hw_context,
13111                                      SEND_CTXT_CHECK_ENABLE);
13112                 if (set)
13113                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13114                 else
13115                         SET_STATIC_RATE_CONTROL_SMASK(reg);
13116                 write_kctxt_csr(dd, sc->hw_context,
13117                                 SEND_CTXT_CHECK_ENABLE, reg);
13118         }
13119 }
13120
13121 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13122 {
13123         int ret = 0;
13124         u64 reg;
13125
13126         if (dd->icode != ICODE_RTL_SILICON) {
13127                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13128                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13129                                     __func__);
13130                 return -EINVAL;
13131         }
13132         reg = read_csr(dd, ASIC_STS_THERM);
13133         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13134                       ASIC_STS_THERM_CURR_TEMP_MASK);
13135         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13136                         ASIC_STS_THERM_LO_TEMP_MASK);
13137         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13138                         ASIC_STS_THERM_HI_TEMP_MASK);
13139         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13140                           ASIC_STS_THERM_CRIT_TEMP_MASK);
13141         /* triggers is a 3-bit value - 1 bit per trigger. */
13142         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13143
13144         return ret;
13145 }
13146
13147 /* ========================================================================= */
13148
13149 /**
13150  * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13151  * @dd: valid devdata
13152  * @src: IRQ source to determine register index from
13153  * @bits: the bits to set or clear
13154  * @set: true == set the bits, false == clear the bits
13155  *
13156  */
13157 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13158                            bool set)
13159 {
13160         u64 reg;
13161         u16 idx = src / BITS_PER_REGISTER;
13162
13163         spin_lock(&dd->irq_src_lock);
13164         reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13165         if (set)
13166                 reg |= bits;
13167         else
13168                 reg &= ~bits;
13169         write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13170         spin_unlock(&dd->irq_src_lock);
13171 }
13172
13173 /**
13174  * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13175  * @dd: valid devdata
13176  * @first: first IRQ source to set/clear
13177  * @last: last IRQ source (inclusive) to set/clear
13178  * @set: true == set the bits, false == clear the bits
13179  *
13180  * If first == last, set the exact source.
13181  */
13182 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13183 {
13184         u64 bits = 0;
13185         u64 bit;
13186         u16 src;
13187
13188         if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13189                 return -EINVAL;
13190
13191         if (last < first)
13192                 return -ERANGE;
13193
13194         for (src = first; src <= last; src++) {
13195                 bit = src % BITS_PER_REGISTER;
13196                 /* wrapped to next register? */
13197                 if (!bit && bits) {
13198                         read_mod_write(dd, src - 1, bits, set);
13199                         bits = 0;
13200                 }
13201                 bits |= BIT_ULL(bit);
13202         }
13203         read_mod_write(dd, last, bits, set);
13204
13205         return 0;
13206 }
13207
13208 /*
13209  * Clear all interrupt sources on the chip.
13210  */
13211 void clear_all_interrupts(struct hfi1_devdata *dd)
13212 {
13213         int i;
13214
13215         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13216                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13217
13218         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13219         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13220         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13221         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13222         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13223         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13224         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13225         for (i = 0; i < chip_send_contexts(dd); i++)
13226                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13227         for (i = 0; i < chip_sdma_engines(dd); i++)
13228                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13229
13230         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13231         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13232         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13233 }
13234
13235 /*
13236  * Remap the interrupt source from the general handler to the given MSI-X
13237  * interrupt.
13238  */
13239 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13240 {
13241         u64 reg;
13242         int m, n;
13243
13244         /* clear from the handled mask of the general interrupt */
13245         m = isrc / 64;
13246         n = isrc % 64;
13247         if (likely(m < CCE_NUM_INT_CSRS)) {
13248                 dd->gi_mask[m] &= ~((u64)1 << n);
13249         } else {
13250                 dd_dev_err(dd, "remap interrupt err\n");
13251                 return;
13252         }
13253
13254         /* direct the chip source to the given MSI-X interrupt */
13255         m = isrc / 8;
13256         n = isrc % 8;
13257         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13258         reg &= ~((u64)0xff << (8 * n));
13259         reg |= ((u64)msix_intr & 0xff) << (8 * n);
13260         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13261 }
13262
13263 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13264 {
13265         /*
13266          * SDMA engine interrupt sources grouped by type, rather than
13267          * engine.  Per-engine interrupts are as follows:
13268          *      SDMA
13269          *      SDMAProgress
13270          *      SDMAIdle
13271          */
13272         remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13273         remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13274         remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13275 }
13276
13277 /*
13278  * Set the general handler to accept all interrupts, remap all
13279  * chip interrupts back to MSI-X 0.
13280  */
13281 void reset_interrupts(struct hfi1_devdata *dd)
13282 {
13283         int i;
13284
13285         /* all interrupts handled by the general handler */
13286         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13287                 dd->gi_mask[i] = ~(u64)0;
13288
13289         /* all chip interrupts map to MSI-X 0 */
13290         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13291                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13292 }
13293
13294 /**
13295  * set_up_interrupts() - Initialize the IRQ resources and state
13296  * @dd: valid devdata
13297  *
13298  */
13299 static int set_up_interrupts(struct hfi1_devdata *dd)
13300 {
13301         int ret;
13302
13303         /* mask all interrupts */
13304         set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13305
13306         /* clear all pending interrupts */
13307         clear_all_interrupts(dd);
13308
13309         /* reset general handler mask, chip MSI-X mappings */
13310         reset_interrupts(dd);
13311
13312         /* ask for MSI-X interrupts */
13313         ret = msix_initialize(dd);
13314         if (ret)
13315                 return ret;
13316
13317         ret = msix_request_irqs(dd);
13318         if (ret)
13319                 msix_clean_up_interrupts(dd);
13320
13321         return ret;
13322 }
13323
13324 /*
13325  * Set up context values in dd.  Sets:
13326  *
13327  *      num_rcv_contexts - number of contexts being used
13328  *      n_krcv_queues - number of kernel contexts
13329  *      first_dyn_alloc_ctxt - first dynamically allocated context
13330  *                             in array of contexts
13331  *      freectxts  - number of free user contexts
13332  *      num_send_contexts - number of PIO send contexts being used
13333  *      num_vnic_contexts - number of contexts reserved for VNIC
13334  */
13335 static int set_up_context_variables(struct hfi1_devdata *dd)
13336 {
13337         unsigned long num_kernel_contexts;
13338         u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13339         int total_contexts;
13340         int ret;
13341         unsigned ngroups;
13342         int rmt_count;
13343         int user_rmt_reduced;
13344         u32 n_usr_ctxts;
13345         u32 send_contexts = chip_send_contexts(dd);
13346         u32 rcv_contexts = chip_rcv_contexts(dd);
13347
13348         /*
13349          * Kernel receive contexts:
13350          * - Context 0 - control context (VL15/multicast/error)
13351          * - Context 1 - first kernel context
13352          * - Context 2 - second kernel context
13353          * ...
13354          */
13355         if (n_krcvqs)
13356                 /*
13357                  * n_krcvqs is the sum of module parameter kernel receive
13358                  * contexts, krcvqs[].  It does not include the control
13359                  * context, so add that.
13360                  */
13361                 num_kernel_contexts = n_krcvqs + 1;
13362         else
13363                 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13364         /*
13365          * Every kernel receive context needs an ACK send context.
13366          * one send context is allocated for each VL{0-7} and VL15
13367          */
13368         if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13369                 dd_dev_err(dd,
13370                            "Reducing # kernel rcv contexts to: %d, from %lu\n",
13371                            send_contexts - num_vls - 1,
13372                            num_kernel_contexts);
13373                 num_kernel_contexts = send_contexts - num_vls - 1;
13374         }
13375
13376         /* Accommodate VNIC contexts if possible */
13377         if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13378                 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13379                 num_vnic_contexts = 0;
13380         }
13381         total_contexts = num_kernel_contexts + num_vnic_contexts;
13382
13383         /*
13384          * User contexts:
13385          *      - default to 1 user context per real (non-HT) CPU core if
13386          *        num_user_contexts is negative
13387          */
13388         if (num_user_contexts < 0)
13389                 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13390         else
13391                 n_usr_ctxts = num_user_contexts;
13392         /*
13393          * Adjust the counts given a global max.
13394          */
13395         if (total_contexts + n_usr_ctxts > rcv_contexts) {
13396                 dd_dev_err(dd,
13397                            "Reducing # user receive contexts to: %d, from %u\n",
13398                            rcv_contexts - total_contexts,
13399                            n_usr_ctxts);
13400                 /* recalculate */
13401                 n_usr_ctxts = rcv_contexts - total_contexts;
13402         }
13403
13404         /*
13405          * The RMT entries are currently allocated as shown below:
13406          * 1. QOS (0 to 128 entries);
13407          * 2. FECN (num_kernel_context - 1 + num_user_contexts +
13408          *    num_vnic_contexts);
13409          * 3. VNIC (num_vnic_contexts).
13410          * It should be noted that FECN oversubscribe num_vnic_contexts
13411          * entries of RMT because both VNIC and PSM could allocate any receive
13412          * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13413          * and PSM FECN must reserve an RMT entry for each possible PSM receive
13414          * context.
13415          */
13416         rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13417         if (HFI1_CAP_IS_KSET(TID_RDMA))
13418                 rmt_count += num_kernel_contexts - 1;
13419         if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13420                 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13421                 dd_dev_err(dd,
13422                            "RMT size is reducing the number of user receive contexts from %u to %d\n",
13423                            n_usr_ctxts,
13424                            user_rmt_reduced);
13425                 /* recalculate */
13426                 n_usr_ctxts = user_rmt_reduced;
13427         }
13428
13429         total_contexts += n_usr_ctxts;
13430
13431         /* the first N are kernel contexts, the rest are user/vnic contexts */
13432         dd->num_rcv_contexts = total_contexts;
13433         dd->n_krcv_queues = num_kernel_contexts;
13434         dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13435         dd->num_vnic_contexts = num_vnic_contexts;
13436         dd->num_user_contexts = n_usr_ctxts;
13437         dd->freectxts = n_usr_ctxts;
13438         dd_dev_info(dd,
13439                     "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13440                     rcv_contexts,
13441                     (int)dd->num_rcv_contexts,
13442                     (int)dd->n_krcv_queues,
13443                     dd->num_vnic_contexts,
13444                     dd->num_user_contexts);
13445
13446         /*
13447          * Receive array allocation:
13448          *   All RcvArray entries are divided into groups of 8. This
13449          *   is required by the hardware and will speed up writes to
13450          *   consecutive entries by using write-combining of the entire
13451          *   cacheline.
13452          *
13453          *   The number of groups are evenly divided among all contexts.
13454          *   any left over groups will be given to the first N user
13455          *   contexts.
13456          */
13457         dd->rcv_entries.group_size = RCV_INCREMENT;
13458         ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13459         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13460         dd->rcv_entries.nctxt_extra = ngroups -
13461                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13462         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13463                     dd->rcv_entries.ngroups,
13464                     dd->rcv_entries.nctxt_extra);
13465         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13466             MAX_EAGER_ENTRIES * 2) {
13467                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13468                         dd->rcv_entries.group_size;
13469                 dd_dev_info(dd,
13470                             "RcvArray group count too high, change to %u\n",
13471                             dd->rcv_entries.ngroups);
13472                 dd->rcv_entries.nctxt_extra = 0;
13473         }
13474         /*
13475          * PIO send contexts
13476          */
13477         ret = init_sc_pools_and_sizes(dd);
13478         if (ret >= 0) { /* success */
13479                 dd->num_send_contexts = ret;
13480                 dd_dev_info(
13481                         dd,
13482                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13483                         send_contexts,
13484                         dd->num_send_contexts,
13485                         dd->sc_sizes[SC_KERNEL].count,
13486                         dd->sc_sizes[SC_ACK].count,
13487                         dd->sc_sizes[SC_USER].count,
13488                         dd->sc_sizes[SC_VL15].count);
13489                 ret = 0;        /* success */
13490         }
13491
13492         return ret;
13493 }
13494
13495 /*
13496  * Set the device/port partition key table. The MAD code
13497  * will ensure that, at least, the partial management
13498  * partition key is present in the table.
13499  */
13500 static void set_partition_keys(struct hfi1_pportdata *ppd)
13501 {
13502         struct hfi1_devdata *dd = ppd->dd;
13503         u64 reg = 0;
13504         int i;
13505
13506         dd_dev_info(dd, "Setting partition keys\n");
13507         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13508                 reg |= (ppd->pkeys[i] &
13509                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13510                         ((i % 4) *
13511                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13512                 /* Each register holds 4 PKey values. */
13513                 if ((i % 4) == 3) {
13514                         write_csr(dd, RCV_PARTITION_KEY +
13515                                   ((i - 3) * 2), reg);
13516                         reg = 0;
13517                 }
13518         }
13519
13520         /* Always enable HW pkeys check when pkeys table is set */
13521         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13522 }
13523
13524 /*
13525  * These CSRs and memories are uninitialized on reset and must be
13526  * written before reading to set the ECC/parity bits.
13527  *
13528  * NOTE: All user context CSRs that are not mmaped write-only
13529  * (e.g. the TID flows) must be initialized even if the driver never
13530  * reads them.
13531  */
13532 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13533 {
13534         int i, j;
13535
13536         /* CceIntMap */
13537         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13538                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13539
13540         /* SendCtxtCreditReturnAddr */
13541         for (i = 0; i < chip_send_contexts(dd); i++)
13542                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13543
13544         /* PIO Send buffers */
13545         /* SDMA Send buffers */
13546         /*
13547          * These are not normally read, and (presently) have no method
13548          * to be read, so are not pre-initialized
13549          */
13550
13551         /* RcvHdrAddr */
13552         /* RcvHdrTailAddr */
13553         /* RcvTidFlowTable */
13554         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13555                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13556                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13557                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13558                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13559         }
13560
13561         /* RcvArray */
13562         for (i = 0; i < chip_rcv_array_count(dd); i++)
13563                 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13564
13565         /* RcvQPMapTable */
13566         for (i = 0; i < 32; i++)
13567                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13568 }
13569
13570 /*
13571  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13572  */
13573 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13574                              u64 ctrl_bits)
13575 {
13576         unsigned long timeout;
13577         u64 reg;
13578
13579         /* is the condition present? */
13580         reg = read_csr(dd, CCE_STATUS);
13581         if ((reg & status_bits) == 0)
13582                 return;
13583
13584         /* clear the condition */
13585         write_csr(dd, CCE_CTRL, ctrl_bits);
13586
13587         /* wait for the condition to clear */
13588         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13589         while (1) {
13590                 reg = read_csr(dd, CCE_STATUS);
13591                 if ((reg & status_bits) == 0)
13592                         return;
13593                 if (time_after(jiffies, timeout)) {
13594                         dd_dev_err(dd,
13595                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13596                                    status_bits, reg & status_bits);
13597                         return;
13598                 }
13599                 udelay(1);
13600         }
13601 }
13602
13603 /* set CCE CSRs to chip reset defaults */
13604 static void reset_cce_csrs(struct hfi1_devdata *dd)
13605 {
13606         int i;
13607
13608         /* CCE_REVISION read-only */
13609         /* CCE_REVISION2 read-only */
13610         /* CCE_CTRL - bits clear automatically */
13611         /* CCE_STATUS read-only, use CceCtrl to clear */
13612         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13613         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13614         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13615         for (i = 0; i < CCE_NUM_SCRATCH; i++)
13616                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13617         /* CCE_ERR_STATUS read-only */
13618         write_csr(dd, CCE_ERR_MASK, 0);
13619         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13620         /* CCE_ERR_FORCE leave alone */
13621         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13622                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13623         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13624         /* CCE_PCIE_CTRL leave alone */
13625         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13626                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13627                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13628                           CCE_MSIX_TABLE_UPPER_RESETCSR);
13629         }
13630         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13631                 /* CCE_MSIX_PBA read-only */
13632                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13633                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13634         }
13635         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13636                 write_csr(dd, CCE_INT_MAP, 0);
13637         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13638                 /* CCE_INT_STATUS read-only */
13639                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13640                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13641                 /* CCE_INT_FORCE leave alone */
13642                 /* CCE_INT_BLOCKED read-only */
13643         }
13644         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13645                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13646 }
13647
13648 /* set MISC CSRs to chip reset defaults */
13649 static void reset_misc_csrs(struct hfi1_devdata *dd)
13650 {
13651         int i;
13652
13653         for (i = 0; i < 32; i++) {
13654                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13655                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13656                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13657         }
13658         /*
13659          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13660          * only be written 128-byte chunks
13661          */
13662         /* init RSA engine to clear lingering errors */
13663         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13664         write_csr(dd, MISC_CFG_RSA_MU, 0);
13665         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13666         /* MISC_STS_8051_DIGEST read-only */
13667         /* MISC_STS_SBM_DIGEST read-only */
13668         /* MISC_STS_PCIE_DIGEST read-only */
13669         /* MISC_STS_FAB_DIGEST read-only */
13670         /* MISC_ERR_STATUS read-only */
13671         write_csr(dd, MISC_ERR_MASK, 0);
13672         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13673         /* MISC_ERR_FORCE leave alone */
13674 }
13675
13676 /* set TXE CSRs to chip reset defaults */
13677 static void reset_txe_csrs(struct hfi1_devdata *dd)
13678 {
13679         int i;
13680
13681         /*
13682          * TXE Kernel CSRs
13683          */
13684         write_csr(dd, SEND_CTRL, 0);
13685         __cm_reset(dd, 0);      /* reset CM internal state */
13686         /* SEND_CONTEXTS read-only */
13687         /* SEND_DMA_ENGINES read-only */
13688         /* SEND_PIO_MEM_SIZE read-only */
13689         /* SEND_DMA_MEM_SIZE read-only */
13690         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13691         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13692         /* SEND_PIO_ERR_STATUS read-only */
13693         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13694         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13695         /* SEND_PIO_ERR_FORCE leave alone */
13696         /* SEND_DMA_ERR_STATUS read-only */
13697         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13698         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13699         /* SEND_DMA_ERR_FORCE leave alone */
13700         /* SEND_EGRESS_ERR_STATUS read-only */
13701         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13702         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13703         /* SEND_EGRESS_ERR_FORCE leave alone */
13704         write_csr(dd, SEND_BTH_QP, 0);
13705         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13706         write_csr(dd, SEND_SC2VLT0, 0);
13707         write_csr(dd, SEND_SC2VLT1, 0);
13708         write_csr(dd, SEND_SC2VLT2, 0);
13709         write_csr(dd, SEND_SC2VLT3, 0);
13710         write_csr(dd, SEND_LEN_CHECK0, 0);
13711         write_csr(dd, SEND_LEN_CHECK1, 0);
13712         /* SEND_ERR_STATUS read-only */
13713         write_csr(dd, SEND_ERR_MASK, 0);
13714         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13715         /* SEND_ERR_FORCE read-only */
13716         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13717                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13718         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13719                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13720         for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13721                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13722         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13723                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13724         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13725                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13726         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13727         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13728         /* SEND_CM_CREDIT_USED_STATUS read-only */
13729         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13730         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13731         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13732         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13733         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13734         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13735                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13736         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13737         /* SEND_CM_CREDIT_USED_VL read-only */
13738         /* SEND_CM_CREDIT_USED_VL15 read-only */
13739         /* SEND_EGRESS_CTXT_STATUS read-only */
13740         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13741         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13742         /* SEND_EGRESS_ERR_INFO read-only */
13743         /* SEND_EGRESS_ERR_SOURCE read-only */
13744
13745         /*
13746          * TXE Per-Context CSRs
13747          */
13748         for (i = 0; i < chip_send_contexts(dd); i++) {
13749                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13750                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13751                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13752                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13753                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13754                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13755                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13756                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13757                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13758                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13759                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13760                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13761         }
13762
13763         /*
13764          * TXE Per-SDMA CSRs
13765          */
13766         for (i = 0; i < chip_sdma_engines(dd); i++) {
13767                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13768                 /* SEND_DMA_STATUS read-only */
13769                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13770                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13771                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13772                 /* SEND_DMA_HEAD read-only */
13773                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13774                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13775                 /* SEND_DMA_IDLE_CNT read-only */
13776                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13777                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13778                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13779                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13780                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13781                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13782                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13783                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13784                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13785                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13786                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13787                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13788                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13789                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13790         }
13791 }
13792
13793 /*
13794  * Expect on entry:
13795  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13796  */
13797 static void init_rbufs(struct hfi1_devdata *dd)
13798 {
13799         u64 reg;
13800         int count;
13801
13802         /*
13803          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13804          * clear.
13805          */
13806         count = 0;
13807         while (1) {
13808                 reg = read_csr(dd, RCV_STATUS);
13809                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13810                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13811                         break;
13812                 /*
13813                  * Give up after 1ms - maximum wait time.
13814                  *
13815                  * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13816                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13817                  *      136 KB / (66% * 250MB/s) = 844us
13818                  */
13819                 if (count++ > 500) {
13820                         dd_dev_err(dd,
13821                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13822                                    __func__, reg);
13823                         break;
13824                 }
13825                 udelay(2); /* do not busy-wait the CSR */
13826         }
13827
13828         /* start the init - expect RcvCtrl to be 0 */
13829         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13830
13831         /*
13832          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13833          * period after the write before RcvStatus.RxRbufInitDone is valid.
13834          * The delay in the first run through the loop below is sufficient and
13835          * required before the first read of RcvStatus.RxRbufInintDone.
13836          */
13837         read_csr(dd, RCV_CTRL);
13838
13839         /* wait for the init to finish */
13840         count = 0;
13841         while (1) {
13842                 /* delay is required first time through - see above */
13843                 udelay(2); /* do not busy-wait the CSR */
13844                 reg = read_csr(dd, RCV_STATUS);
13845                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13846                         break;
13847
13848                 /* give up after 100us - slowest possible at 33MHz is 73us */
13849                 if (count++ > 50) {
13850                         dd_dev_err(dd,
13851                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13852                                    __func__);
13853                         break;
13854                 }
13855         }
13856 }
13857
13858 /* set RXE CSRs to chip reset defaults */
13859 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13860 {
13861         int i, j;
13862
13863         /*
13864          * RXE Kernel CSRs
13865          */
13866         write_csr(dd, RCV_CTRL, 0);
13867         init_rbufs(dd);
13868         /* RCV_STATUS read-only */
13869         /* RCV_CONTEXTS read-only */
13870         /* RCV_ARRAY_CNT read-only */
13871         /* RCV_BUF_SIZE read-only */
13872         write_csr(dd, RCV_BTH_QP, 0);
13873         write_csr(dd, RCV_MULTICAST, 0);
13874         write_csr(dd, RCV_BYPASS, 0);
13875         write_csr(dd, RCV_VL15, 0);
13876         /* this is a clear-down */
13877         write_csr(dd, RCV_ERR_INFO,
13878                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13879         /* RCV_ERR_STATUS read-only */
13880         write_csr(dd, RCV_ERR_MASK, 0);
13881         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13882         /* RCV_ERR_FORCE leave alone */
13883         for (i = 0; i < 32; i++)
13884                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13885         for (i = 0; i < 4; i++)
13886                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13887         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13888                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13889         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13890                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13891         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13892                 clear_rsm_rule(dd, i);
13893         for (i = 0; i < 32; i++)
13894                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13895
13896         /*
13897          * RXE Kernel and User Per-Context CSRs
13898          */
13899         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13900                 /* kernel */
13901                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13902                 /* RCV_CTXT_STATUS read-only */
13903                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13904                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13905                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13906                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13907                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13908                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13909                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13910                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13911                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13912                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13913
13914                 /* user */
13915                 /* RCV_HDR_TAIL read-only */
13916                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13917                 /* RCV_EGR_INDEX_TAIL read-only */
13918                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13919                 /* RCV_EGR_OFFSET_TAIL read-only */
13920                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13921                         write_uctxt_csr(dd, i,
13922                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
13923                 }
13924         }
13925 }
13926
13927 /*
13928  * Set sc2vl tables.
13929  *
13930  * They power on to zeros, so to avoid send context errors
13931  * they need to be set:
13932  *
13933  * SC 0-7 -> VL 0-7 (respectively)
13934  * SC 15  -> VL 15
13935  * otherwise
13936  *        -> VL 0
13937  */
13938 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13939 {
13940         int i;
13941         /* init per architecture spec, constrained by hardware capability */
13942
13943         /* HFI maps sent packets */
13944         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13945                 0,
13946                 0, 0, 1, 1,
13947                 2, 2, 3, 3,
13948                 4, 4, 5, 5,
13949                 6, 6, 7, 7));
13950         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13951                 1,
13952                 8, 0, 9, 0,
13953                 10, 0, 11, 0,
13954                 12, 0, 13, 0,
13955                 14, 0, 15, 15));
13956         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13957                 2,
13958                 16, 0, 17, 0,
13959                 18, 0, 19, 0,
13960                 20, 0, 21, 0,
13961                 22, 0, 23, 0));
13962         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13963                 3,
13964                 24, 0, 25, 0,
13965                 26, 0, 27, 0,
13966                 28, 0, 29, 0,
13967                 30, 0, 31, 0));
13968
13969         /* DC maps received packets */
13970         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13971                 15_0,
13972                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13973                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13974         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13975                 31_16,
13976                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13977                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13978
13979         /* initialize the cached sc2vl values consistently with h/w */
13980         for (i = 0; i < 32; i++) {
13981                 if (i < 8 || i == 15)
13982                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13983                 else
13984                         *((u8 *)(dd->sc2vl) + i) = 0;
13985         }
13986 }
13987
13988 /*
13989  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13990  * depend on the chip going through a power-on reset - a driver may be loaded
13991  * and unloaded many times.
13992  *
13993  * Do not write any CSR values to the chip in this routine - there may be
13994  * a reset following the (possible) FLR in this routine.
13995  *
13996  */
13997 static int init_chip(struct hfi1_devdata *dd)
13998 {
13999         int i;
14000         int ret = 0;
14001
14002         /*
14003          * Put the HFI CSRs in a known state.
14004          * Combine this with a DC reset.
14005          *
14006          * Stop the device from doing anything while we do a
14007          * reset.  We know there are no other active users of
14008          * the device since we are now in charge.  Turn off
14009          * off all outbound and inbound traffic and make sure
14010          * the device does not generate any interrupts.
14011          */
14012
14013         /* disable send contexts and SDMA engines */
14014         write_csr(dd, SEND_CTRL, 0);
14015         for (i = 0; i < chip_send_contexts(dd); i++)
14016                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14017         for (i = 0; i < chip_sdma_engines(dd); i++)
14018                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14019         /* disable port (turn off RXE inbound traffic) and contexts */
14020         write_csr(dd, RCV_CTRL, 0);
14021         for (i = 0; i < chip_rcv_contexts(dd); i++)
14022                 write_csr(dd, RCV_CTXT_CTRL, 0);
14023         /* mask all interrupt sources */
14024         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14025                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14026
14027         /*
14028          * DC Reset: do a full DC reset before the register clear.
14029          * A recommended length of time to hold is one CSR read,
14030          * so reread the CceDcCtrl.  Then, hold the DC in reset
14031          * across the clear.
14032          */
14033         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14034         (void)read_csr(dd, CCE_DC_CTRL);
14035
14036         if (use_flr) {
14037                 /*
14038                  * A FLR will reset the SPC core and part of the PCIe.
14039                  * The parts that need to be restored have already been
14040                  * saved.
14041                  */
14042                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14043
14044                 /* do the FLR, the DC reset will remain */
14045                 pcie_flr(dd->pcidev);
14046
14047                 /* restore command and BARs */
14048                 ret = restore_pci_variables(dd);
14049                 if (ret) {
14050                         dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14051                                    __func__);
14052                         return ret;
14053                 }
14054
14055                 if (is_ax(dd)) {
14056                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
14057                         pcie_flr(dd->pcidev);
14058                         ret = restore_pci_variables(dd);
14059                         if (ret) {
14060                                 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14061                                            __func__);
14062                                 return ret;
14063                         }
14064                 }
14065         } else {
14066                 dd_dev_info(dd, "Resetting CSRs with writes\n");
14067                 reset_cce_csrs(dd);
14068                 reset_txe_csrs(dd);
14069                 reset_rxe_csrs(dd);
14070                 reset_misc_csrs(dd);
14071         }
14072         /* clear the DC reset */
14073         write_csr(dd, CCE_DC_CTRL, 0);
14074
14075         /* Set the LED off */
14076         setextled(dd, 0);
14077
14078         /*
14079          * Clear the QSFP reset.
14080          * An FLR enforces a 0 on all out pins. The driver does not touch
14081          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
14082          * anything plugged constantly in reset, if it pays attention
14083          * to RESET_N.
14084          * Prime examples of this are optical cables. Set all pins high.
14085          * I2CCLK and I2CDAT will change per direction, and INT_N and
14086          * MODPRS_N are input only and their value is ignored.
14087          */
14088         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14089         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14090         init_chip_resources(dd);
14091         return ret;
14092 }
14093
14094 static void init_early_variables(struct hfi1_devdata *dd)
14095 {
14096         int i;
14097
14098         /* assign link credit variables */
14099         dd->vau = CM_VAU;
14100         dd->link_credits = CM_GLOBAL_CREDITS;
14101         if (is_ax(dd))
14102                 dd->link_credits--;
14103         dd->vcu = cu_to_vcu(hfi1_cu);
14104         /* enough room for 8 MAD packets plus header - 17K */
14105         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14106         if (dd->vl15_init > dd->link_credits)
14107                 dd->vl15_init = dd->link_credits;
14108
14109         write_uninitialized_csrs_and_memories(dd);
14110
14111         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14112                 for (i = 0; i < dd->num_pports; i++) {
14113                         struct hfi1_pportdata *ppd = &dd->pport[i];
14114
14115                         set_partition_keys(ppd);
14116                 }
14117         init_sc2vl_tables(dd);
14118 }
14119
14120 static void init_kdeth_qp(struct hfi1_devdata *dd)
14121 {
14122         /* user changed the KDETH_QP */
14123         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14124                 /* out of range or illegal value */
14125                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14126                 kdeth_qp = 0;
14127         }
14128         if (kdeth_qp == 0)      /* not set, or failed range check */
14129                 kdeth_qp = DEFAULT_KDETH_QP;
14130
14131         write_csr(dd, SEND_BTH_QP,
14132                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14133                   SEND_BTH_QP_KDETH_QP_SHIFT);
14134
14135         write_csr(dd, RCV_BTH_QP,
14136                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14137                   RCV_BTH_QP_KDETH_QP_SHIFT);
14138 }
14139
14140 /**
14141  * hfi1_get_qp_map
14142  * @dd: device data
14143  * @idx: index to read
14144  */
14145 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14146 {
14147         u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14148
14149         reg >>= (idx % 8) * 8;
14150         return reg;
14151 }
14152
14153 /**
14154  * init_qpmap_table
14155  * @dd - device data
14156  * @first_ctxt - first context
14157  * @last_ctxt - first context
14158  *
14159  * This return sets the qpn mapping table that
14160  * is indexed by qpn[8:1].
14161  *
14162  * The routine will round robin the 256 settings
14163  * from first_ctxt to last_ctxt.
14164  *
14165  * The first/last looks ahead to having specialized
14166  * receive contexts for mgmt and bypass.  Normal
14167  * verbs traffic will assumed to be on a range
14168  * of receive contexts.
14169  */
14170 static void init_qpmap_table(struct hfi1_devdata *dd,
14171                              u32 first_ctxt,
14172                              u32 last_ctxt)
14173 {
14174         u64 reg = 0;
14175         u64 regno = RCV_QP_MAP_TABLE;
14176         int i;
14177         u64 ctxt = first_ctxt;
14178
14179         for (i = 0; i < 256; i++) {
14180                 reg |= ctxt << (8 * (i % 8));
14181                 ctxt++;
14182                 if (ctxt > last_ctxt)
14183                         ctxt = first_ctxt;
14184                 if (i % 8 == 7) {
14185                         write_csr(dd, regno, reg);
14186                         reg = 0;
14187                         regno += 8;
14188                 }
14189         }
14190
14191         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14192                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14193 }
14194
14195 struct rsm_map_table {
14196         u64 map[NUM_MAP_REGS];
14197         unsigned int used;
14198 };
14199
14200 struct rsm_rule_data {
14201         u8 offset;
14202         u8 pkt_type;
14203         u32 field1_off;
14204         u32 field2_off;
14205         u32 index1_off;
14206         u32 index1_width;
14207         u32 index2_off;
14208         u32 index2_width;
14209         u32 mask1;
14210         u32 value1;
14211         u32 mask2;
14212         u32 value2;
14213 };
14214
14215 /*
14216  * Return an initialized RMT map table for users to fill in.  OK if it
14217  * returns NULL, indicating no table.
14218  */
14219 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14220 {
14221         struct rsm_map_table *rmt;
14222         u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14223
14224         rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14225         if (rmt) {
14226                 memset(rmt->map, rxcontext, sizeof(rmt->map));
14227                 rmt->used = 0;
14228         }
14229
14230         return rmt;
14231 }
14232
14233 /*
14234  * Write the final RMT map table to the chip and free the table.  OK if
14235  * table is NULL.
14236  */
14237 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14238                                    struct rsm_map_table *rmt)
14239 {
14240         int i;
14241
14242         if (rmt) {
14243                 /* write table to chip */
14244                 for (i = 0; i < NUM_MAP_REGS; i++)
14245                         write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14246
14247                 /* enable RSM */
14248                 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14249         }
14250 }
14251
14252 /*
14253  * Add a receive side mapping rule.
14254  */
14255 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14256                          struct rsm_rule_data *rrd)
14257 {
14258         write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14259                   (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14260                   1ull << rule_index | /* enable bit */
14261                   (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14262         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14263                   (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14264                   (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14265                   (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14266                   (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14267                   (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14268                   (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14269         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14270                   (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14271                   (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14272                   (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14273                   (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14274 }
14275
14276 /*
14277  * Clear a receive side mapping rule.
14278  */
14279 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14280 {
14281         write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14282         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14283         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14284 }
14285
14286 /* return the number of RSM map table entries that will be used for QOS */
14287 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14288                            unsigned int *np)
14289 {
14290         int i;
14291         unsigned int m, n;
14292         u8 max_by_vl = 0;
14293
14294         /* is QOS active at all? */
14295         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14296             num_vls == 1 ||
14297             krcvqsset <= 1)
14298                 goto no_qos;
14299
14300         /* determine bits for qpn */
14301         for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14302                 if (krcvqs[i] > max_by_vl)
14303                         max_by_vl = krcvqs[i];
14304         if (max_by_vl > 32)
14305                 goto no_qos;
14306         m = ilog2(__roundup_pow_of_two(max_by_vl));
14307
14308         /* determine bits for vl */
14309         n = ilog2(__roundup_pow_of_two(num_vls));
14310
14311         /* reject if too much is used */
14312         if ((m + n) > 7)
14313                 goto no_qos;
14314
14315         if (mp)
14316                 *mp = m;
14317         if (np)
14318                 *np = n;
14319
14320         return 1 << (m + n);
14321
14322 no_qos:
14323         if (mp)
14324                 *mp = 0;
14325         if (np)
14326                 *np = 0;
14327         return 0;
14328 }
14329
14330 /**
14331  * init_qos - init RX qos
14332  * @dd - device data
14333  * @rmt - RSM map table
14334  *
14335  * This routine initializes Rule 0 and the RSM map table to implement
14336  * quality of service (qos).
14337  *
14338  * If all of the limit tests succeed, qos is applied based on the array
14339  * interpretation of krcvqs where entry 0 is VL0.
14340  *
14341  * The number of vl bits (n) and the number of qpn bits (m) are computed to
14342  * feed both the RSM map table and the single rule.
14343  */
14344 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14345 {
14346         struct rsm_rule_data rrd;
14347         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14348         unsigned int rmt_entries;
14349         u64 reg;
14350
14351         if (!rmt)
14352                 goto bail;
14353         rmt_entries = qos_rmt_entries(dd, &m, &n);
14354         if (rmt_entries == 0)
14355                 goto bail;
14356         qpns_per_vl = 1 << m;
14357
14358         /* enough room in the map table? */
14359         rmt_entries = 1 << (m + n);
14360         if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14361                 goto bail;
14362
14363         /* add qos entries to the the RSM map table */
14364         for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14365                 unsigned tctxt;
14366
14367                 for (qpn = 0, tctxt = ctxt;
14368                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14369                         unsigned idx, regoff, regidx;
14370
14371                         /* generate the index the hardware will produce */
14372                         idx = rmt->used + ((qpn << n) ^ i);
14373                         regoff = (idx % 8) * 8;
14374                         regidx = idx / 8;
14375                         /* replace default with context number */
14376                         reg = rmt->map[regidx];
14377                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14378                                 << regoff);
14379                         reg |= (u64)(tctxt++) << regoff;
14380                         rmt->map[regidx] = reg;
14381                         if (tctxt == ctxt + krcvqs[i])
14382                                 tctxt = ctxt;
14383                 }
14384                 ctxt += krcvqs[i];
14385         }
14386
14387         rrd.offset = rmt->used;
14388         rrd.pkt_type = 2;
14389         rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14390         rrd.field2_off = LRH_SC_MATCH_OFFSET;
14391         rrd.index1_off = LRH_SC_SELECT_OFFSET;
14392         rrd.index1_width = n;
14393         rrd.index2_off = QPN_SELECT_OFFSET;
14394         rrd.index2_width = m + n;
14395         rrd.mask1 = LRH_BTH_MASK;
14396         rrd.value1 = LRH_BTH_VALUE;
14397         rrd.mask2 = LRH_SC_MASK;
14398         rrd.value2 = LRH_SC_VALUE;
14399
14400         /* add rule 0 */
14401         add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14402
14403         /* mark RSM map entries as used */
14404         rmt->used += rmt_entries;
14405         /* map everything else to the mcast/err/vl15 context */
14406         init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14407         dd->qos_shift = n + 1;
14408         return;
14409 bail:
14410         dd->qos_shift = 1;
14411         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14412 }
14413
14414 static void init_fecn_handling(struct hfi1_devdata *dd,
14415                                struct rsm_map_table *rmt)
14416 {
14417         struct rsm_rule_data rrd;
14418         u64 reg;
14419         int i, idx, regoff, regidx, start;
14420         u8 offset;
14421         u32 total_cnt;
14422
14423         if (HFI1_CAP_IS_KSET(TID_RDMA))
14424                 /* Exclude context 0 */
14425                 start = 1;
14426         else
14427                 start = dd->first_dyn_alloc_ctxt;
14428
14429         total_cnt = dd->num_rcv_contexts - start;
14430
14431         /* there needs to be enough room in the map table */
14432         if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14433                 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14434                 return;
14435         }
14436
14437         /*
14438          * RSM will extract the destination context as an index into the
14439          * map table.  The destination contexts are a sequential block
14440          * in the range start...num_rcv_contexts-1 (inclusive).
14441          * Map entries are accessed as offset + extracted value.  Adjust
14442          * the added offset so this sequence can be placed anywhere in
14443          * the table - as long as the entries themselves do not wrap.
14444          * There are only enough bits in offset for the table size, so
14445          * start with that to allow for a "negative" offset.
14446          */
14447         offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14448
14449         for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14450              i++, idx++) {
14451                 /* replace with identity mapping */
14452                 regoff = (idx % 8) * 8;
14453                 regidx = idx / 8;
14454                 reg = rmt->map[regidx];
14455                 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14456                 reg |= (u64)i << regoff;
14457                 rmt->map[regidx] = reg;
14458         }
14459
14460         /*
14461          * For RSM intercept of Expected FECN packets:
14462          * o packet type 0 - expected
14463          * o match on F (bit 95), using select/match 1, and
14464          * o match on SH (bit 133), using select/match 2.
14465          *
14466          * Use index 1 to extract the 8-bit receive context from DestQP
14467          * (start at bit 64).  Use that as the RSM map table index.
14468          */
14469         rrd.offset = offset;
14470         rrd.pkt_type = 0;
14471         rrd.field1_off = 95;
14472         rrd.field2_off = 133;
14473         rrd.index1_off = 64;
14474         rrd.index1_width = 8;
14475         rrd.index2_off = 0;
14476         rrd.index2_width = 0;
14477         rrd.mask1 = 1;
14478         rrd.value1 = 1;
14479         rrd.mask2 = 1;
14480         rrd.value2 = 1;
14481
14482         /* add rule 1 */
14483         add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14484
14485         rmt->used += total_cnt;
14486 }
14487
14488 /* Initialize RSM for VNIC */
14489 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14490 {
14491         u8 i, j;
14492         u8 ctx_id = 0;
14493         u64 reg;
14494         u32 regoff;
14495         struct rsm_rule_data rrd;
14496
14497         if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14498                 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14499                            dd->vnic.rmt_start);
14500                 return;
14501         }
14502
14503         dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14504                 dd->vnic.rmt_start,
14505                 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14506
14507         /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14508         regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14509         reg = read_csr(dd, regoff);
14510         for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14511                 /* Update map register with vnic context */
14512                 j = (dd->vnic.rmt_start + i) % 8;
14513                 reg &= ~(0xffllu << (j * 8));
14514                 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14515                 /* Wrap up vnic ctx index */
14516                 ctx_id %= dd->vnic.num_ctxt;
14517                 /* Write back map register */
14518                 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14519                         dev_dbg(&(dd)->pcidev->dev,
14520                                 "Vnic rsm map reg[%d] =0x%llx\n",
14521                                 regoff - RCV_RSM_MAP_TABLE, reg);
14522
14523                         write_csr(dd, regoff, reg);
14524                         regoff += 8;
14525                         if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14526                                 reg = read_csr(dd, regoff);
14527                 }
14528         }
14529
14530         /* Add rule for vnic */
14531         rrd.offset = dd->vnic.rmt_start;
14532         rrd.pkt_type = 4;
14533         /* Match 16B packets */
14534         rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14535         rrd.mask1 = L2_TYPE_MASK;
14536         rrd.value1 = L2_16B_VALUE;
14537         /* Match ETH L4 packets */
14538         rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14539         rrd.mask2 = L4_16B_TYPE_MASK;
14540         rrd.value2 = L4_16B_ETH_VALUE;
14541         /* Calc context from veswid and entropy */
14542         rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14543         rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14544         rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14545         rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14546         add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14547
14548         /* Enable RSM if not already enabled */
14549         add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14550 }
14551
14552 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14553 {
14554         clear_rsm_rule(dd, RSM_INS_VNIC);
14555
14556         /* Disable RSM if used only by vnic */
14557         if (dd->vnic.rmt_start == 0)
14558                 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14559 }
14560
14561 static int init_rxe(struct hfi1_devdata *dd)
14562 {
14563         struct rsm_map_table *rmt;
14564         u64 val;
14565
14566         /* enable all receive errors */
14567         write_csr(dd, RCV_ERR_MASK, ~0ull);
14568
14569         rmt = alloc_rsm_map_table(dd);
14570         if (!rmt)
14571                 return -ENOMEM;
14572
14573         /* set up QOS, including the QPN map table */
14574         init_qos(dd, rmt);
14575         init_fecn_handling(dd, rmt);
14576         complete_rsm_map_table(dd, rmt);
14577         /* record number of used rsm map entries for vnic */
14578         dd->vnic.rmt_start = rmt->used;
14579         kfree(rmt);
14580
14581         /*
14582          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14583          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14584          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14585          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14586          * Max_PayLoad_Size set to its minimum of 128.
14587          *
14588          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14589          * (64 bytes).  Max_Payload_Size is possibly modified upward in
14590          * tune_pcie_caps() which is called after this routine.
14591          */
14592
14593         /* Have 16 bytes (4DW) of bypass header available in header queue */
14594         val = read_csr(dd, RCV_BYPASS);
14595         val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14596         val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14597                 RCV_BYPASS_HDR_SIZE_SHIFT);
14598         write_csr(dd, RCV_BYPASS, val);
14599         return 0;
14600 }
14601
14602 static void init_other(struct hfi1_devdata *dd)
14603 {
14604         /* enable all CCE errors */
14605         write_csr(dd, CCE_ERR_MASK, ~0ull);
14606         /* enable *some* Misc errors */
14607         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14608         /* enable all DC errors, except LCB */
14609         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14610         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14611 }
14612
14613 /*
14614  * Fill out the given AU table using the given CU.  A CU is defined in terms
14615  * AUs.  The table is a an encoding: given the index, how many AUs does that
14616  * represent?
14617  *
14618  * NOTE: Assumes that the register layout is the same for the
14619  * local and remote tables.
14620  */
14621 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14622                                u32 csr0to3, u32 csr4to7)
14623 {
14624         write_csr(dd, csr0to3,
14625                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14626                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14627                   2ull * cu <<
14628                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14629                   4ull * cu <<
14630                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14631         write_csr(dd, csr4to7,
14632                   8ull * cu <<
14633                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14634                   16ull * cu <<
14635                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14636                   32ull * cu <<
14637                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14638                   64ull * cu <<
14639                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14640 }
14641
14642 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14643 {
14644         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14645                            SEND_CM_LOCAL_AU_TABLE4_TO7);
14646 }
14647
14648 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14649 {
14650         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14651                            SEND_CM_REMOTE_AU_TABLE4_TO7);
14652 }
14653
14654 static void init_txe(struct hfi1_devdata *dd)
14655 {
14656         int i;
14657
14658         /* enable all PIO, SDMA, general, and Egress errors */
14659         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14660         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14661         write_csr(dd, SEND_ERR_MASK, ~0ull);
14662         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14663
14664         /* enable all per-context and per-SDMA engine errors */
14665         for (i = 0; i < chip_send_contexts(dd); i++)
14666                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14667         for (i = 0; i < chip_sdma_engines(dd); i++)
14668                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14669
14670         /* set the local CU to AU mapping */
14671         assign_local_cm_au_table(dd, dd->vcu);
14672
14673         /*
14674          * Set reasonable default for Credit Return Timer
14675          * Don't set on Simulator - causes it to choke.
14676          */
14677         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14678                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14679 }
14680
14681 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14682                        u16 jkey)
14683 {
14684         u8 hw_ctxt;
14685         u64 reg;
14686
14687         if (!rcd || !rcd->sc)
14688                 return -EINVAL;
14689
14690         hw_ctxt = rcd->sc->hw_context;
14691         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14692                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14693                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14694         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14695         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14696                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14697         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14698         /*
14699          * Enable send-side J_KEY integrity check, unless this is A0 h/w
14700          */
14701         if (!is_ax(dd)) {
14702                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14703                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14704                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14705         }
14706
14707         /* Enable J_KEY check on receive context. */
14708         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14709                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14710                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14711         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14712
14713         return 0;
14714 }
14715
14716 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14717 {
14718         u8 hw_ctxt;
14719         u64 reg;
14720
14721         if (!rcd || !rcd->sc)
14722                 return -EINVAL;
14723
14724         hw_ctxt = rcd->sc->hw_context;
14725         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14726         /*
14727          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14728          * This check would not have been enabled for A0 h/w, see
14729          * set_ctxt_jkey().
14730          */
14731         if (!is_ax(dd)) {
14732                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14733                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14734                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14735         }
14736         /* Turn off the J_KEY on the receive side */
14737         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14738
14739         return 0;
14740 }
14741
14742 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14743                        u16 pkey)
14744 {
14745         u8 hw_ctxt;
14746         u64 reg;
14747
14748         if (!rcd || !rcd->sc)
14749                 return -EINVAL;
14750
14751         hw_ctxt = rcd->sc->hw_context;
14752         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14753                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14754         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14755         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14756         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14757         reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14758         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14759
14760         return 0;
14761 }
14762
14763 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14764 {
14765         u8 hw_ctxt;
14766         u64 reg;
14767
14768         if (!ctxt || !ctxt->sc)
14769                 return -EINVAL;
14770
14771         hw_ctxt = ctxt->sc->hw_context;
14772         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14773         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14774         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14775         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14776
14777         return 0;
14778 }
14779
14780 /*
14781  * Start doing the clean up the the chip. Our clean up happens in multiple
14782  * stages and this is just the first.
14783  */
14784 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14785 {
14786         aspm_exit(dd);
14787         free_cntrs(dd);
14788         free_rcverr(dd);
14789         finish_chip_resources(dd);
14790 }
14791
14792 #define HFI_BASE_GUID(dev) \
14793         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14794
14795 /*
14796  * Information can be shared between the two HFIs on the same ASIC
14797  * in the same OS.  This function finds the peer device and sets
14798  * up a shared structure.
14799  */
14800 static int init_asic_data(struct hfi1_devdata *dd)
14801 {
14802         unsigned long index;
14803         struct hfi1_devdata *peer;
14804         struct hfi1_asic_data *asic_data;
14805         int ret = 0;
14806
14807         /* pre-allocate the asic structure in case we are the first device */
14808         asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14809         if (!asic_data)
14810                 return -ENOMEM;
14811
14812         xa_lock_irq(&hfi1_dev_table);
14813         /* Find our peer device */
14814         xa_for_each(&hfi1_dev_table, index, peer) {
14815                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14816                     dd->unit != peer->unit)
14817                         break;
14818         }
14819
14820         if (peer) {
14821                 /* use already allocated structure */
14822                 dd->asic_data = peer->asic_data;
14823                 kfree(asic_data);
14824         } else {
14825                 dd->asic_data = asic_data;
14826                 mutex_init(&dd->asic_data->asic_resource_mutex);
14827         }
14828         dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14829         xa_unlock_irq(&hfi1_dev_table);
14830
14831         /* first one through - set up i2c devices */
14832         if (!peer)
14833                 ret = set_up_i2c(dd, dd->asic_data);
14834
14835         return ret;
14836 }
14837
14838 /*
14839  * Set dd->boardname.  Use a generic name if a name is not returned from
14840  * EFI variable space.
14841  *
14842  * Return 0 on success, -ENOMEM if space could not be allocated.
14843  */
14844 static int obtain_boardname(struct hfi1_devdata *dd)
14845 {
14846         /* generic board description */
14847         const char generic[] =
14848                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14849         unsigned long size;
14850         int ret;
14851
14852         ret = read_hfi1_efi_var(dd, "description", &size,
14853                                 (void **)&dd->boardname);
14854         if (ret) {
14855                 dd_dev_info(dd, "Board description not found\n");
14856                 /* use generic description */
14857                 dd->boardname = kstrdup(generic, GFP_KERNEL);
14858                 if (!dd->boardname)
14859                         return -ENOMEM;
14860         }
14861         return 0;
14862 }
14863
14864 /*
14865  * Check the interrupt registers to make sure that they are mapped correctly.
14866  * It is intended to help user identify any mismapping by VMM when the driver
14867  * is running in a VM. This function should only be called before interrupt
14868  * is set up properly.
14869  *
14870  * Return 0 on success, -EINVAL on failure.
14871  */
14872 static int check_int_registers(struct hfi1_devdata *dd)
14873 {
14874         u64 reg;
14875         u64 all_bits = ~(u64)0;
14876         u64 mask;
14877
14878         /* Clear CceIntMask[0] to avoid raising any interrupts */
14879         mask = read_csr(dd, CCE_INT_MASK);
14880         write_csr(dd, CCE_INT_MASK, 0ull);
14881         reg = read_csr(dd, CCE_INT_MASK);
14882         if (reg)
14883                 goto err_exit;
14884
14885         /* Clear all interrupt status bits */
14886         write_csr(dd, CCE_INT_CLEAR, all_bits);
14887         reg = read_csr(dd, CCE_INT_STATUS);
14888         if (reg)
14889                 goto err_exit;
14890
14891         /* Set all interrupt status bits */
14892         write_csr(dd, CCE_INT_FORCE, all_bits);
14893         reg = read_csr(dd, CCE_INT_STATUS);
14894         if (reg != all_bits)
14895                 goto err_exit;
14896
14897         /* Restore the interrupt mask */
14898         write_csr(dd, CCE_INT_CLEAR, all_bits);
14899         write_csr(dd, CCE_INT_MASK, mask);
14900
14901         return 0;
14902 err_exit:
14903         write_csr(dd, CCE_INT_MASK, mask);
14904         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14905         return -EINVAL;
14906 }
14907
14908 /**
14909  * hfi1_init_dd() - Initialize most of the dd structure.
14910  * @dev: the pci_dev for hfi1_ib device
14911  * @ent: pci_device_id struct for this dev
14912  *
14913  * This is global, and is called directly at init to set up the
14914  * chip-specific function pointers for later use.
14915  */
14916 int hfi1_init_dd(struct hfi1_devdata *dd)
14917 {
14918         struct pci_dev *pdev = dd->pcidev;
14919         struct hfi1_pportdata *ppd;
14920         u64 reg;
14921         int i, ret;
14922         static const char * const inames[] = { /* implementation names */
14923                 "RTL silicon",
14924                 "RTL VCS simulation",
14925                 "RTL FPGA emulation",
14926                 "Functional simulator"
14927         };
14928         struct pci_dev *parent = pdev->bus->self;
14929         u32 sdma_engines = chip_sdma_engines(dd);
14930
14931         ppd = dd->pport;
14932         for (i = 0; i < dd->num_pports; i++, ppd++) {
14933                 int vl;
14934                 /* init common fields */
14935                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14936                 /* DC supports 4 link widths */
14937                 ppd->link_width_supported =
14938                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14939                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14940                 ppd->link_width_downgrade_supported =
14941                         ppd->link_width_supported;
14942                 /* start out enabling only 4X */
14943                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14944                 ppd->link_width_downgrade_enabled =
14945                                         ppd->link_width_downgrade_supported;
14946                 /* link width active is 0 when link is down */
14947                 /* link width downgrade active is 0 when link is down */
14948
14949                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14950                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
14951                         dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14952                                    num_vls, HFI1_MAX_VLS_SUPPORTED);
14953                         num_vls = HFI1_MAX_VLS_SUPPORTED;
14954                 }
14955                 ppd->vls_supported = num_vls;
14956                 ppd->vls_operational = ppd->vls_supported;
14957                 /* Set the default MTU. */
14958                 for (vl = 0; vl < num_vls; vl++)
14959                         dd->vld[vl].mtu = hfi1_max_mtu;
14960                 dd->vld[15].mtu = MAX_MAD_PACKET;
14961                 /*
14962                  * Set the initial values to reasonable default, will be set
14963                  * for real when link is up.
14964                  */
14965                 ppd->overrun_threshold = 0x4;
14966                 ppd->phy_error_threshold = 0xf;
14967                 ppd->port_crc_mode_enabled = link_crc_mask;
14968                 /* initialize supported LTP CRC mode */
14969                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14970                 /* initialize enabled LTP CRC mode */
14971                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14972                 /* start in offline */
14973                 ppd->host_link_state = HLS_DN_OFFLINE;
14974                 init_vl_arb_caches(ppd);
14975         }
14976
14977         /*
14978          * Do remaining PCIe setup and save PCIe values in dd.
14979          * Any error printing is already done by the init code.
14980          * On return, we have the chip mapped.
14981          */
14982         ret = hfi1_pcie_ddinit(dd, pdev);
14983         if (ret < 0)
14984                 goto bail_free;
14985
14986         /* Save PCI space registers to rewrite after device reset */
14987         ret = save_pci_variables(dd);
14988         if (ret < 0)
14989                 goto bail_cleanup;
14990
14991         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14992                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14993         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14994                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
14995
14996         /*
14997          * Check interrupt registers mapping if the driver has no access to
14998          * the upstream component. In this case, it is likely that the driver
14999          * is running in a VM.
15000          */
15001         if (!parent) {
15002                 ret = check_int_registers(dd);
15003                 if (ret)
15004                         goto bail_cleanup;
15005         }
15006
15007         /*
15008          * obtain the hardware ID - NOT related to unit, which is a
15009          * software enumeration
15010          */
15011         reg = read_csr(dd, CCE_REVISION2);
15012         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15013                                         & CCE_REVISION2_HFI_ID_MASK;
15014         /* the variable size will remove unwanted bits */
15015         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15016         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15017         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15018                     dd->icode < ARRAY_SIZE(inames) ?
15019                     inames[dd->icode] : "unknown", (int)dd->irev);
15020
15021         /* speeds the hardware can support */
15022         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15023         /* speeds allowed to run at */
15024         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15025         /* give a reasonable active value, will be set on link up */
15026         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15027
15028         /* fix up link widths for emulation _p */
15029         ppd = dd->pport;
15030         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15031                 ppd->link_width_supported =
15032                         ppd->link_width_enabled =
15033                         ppd->link_width_downgrade_supported =
15034                         ppd->link_width_downgrade_enabled =
15035                                 OPA_LINK_WIDTH_1X;
15036         }
15037         /* insure num_vls isn't larger than number of sdma engines */
15038         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
15039                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15040                            num_vls, sdma_engines);
15041                 num_vls = sdma_engines;
15042                 ppd->vls_supported = sdma_engines;
15043                 ppd->vls_operational = ppd->vls_supported;
15044         }
15045
15046         /*
15047          * Convert the ns parameter to the 64 * cclocks used in the CSR.
15048          * Limit the max if larger than the field holds.  If timeout is
15049          * non-zero, then the calculated field will be at least 1.
15050          *
15051          * Must be after icode is set up - the cclock rate depends
15052          * on knowing the hardware being used.
15053          */
15054         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15055         if (dd->rcv_intr_timeout_csr >
15056                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15057                 dd->rcv_intr_timeout_csr =
15058                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15059         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15060                 dd->rcv_intr_timeout_csr = 1;
15061
15062         /* needs to be done before we look for the peer device */
15063         read_guid(dd);
15064
15065         /* set up shared ASIC data with peer device */
15066         ret = init_asic_data(dd);
15067         if (ret)
15068                 goto bail_cleanup;
15069
15070         /* obtain chip sizes, reset chip CSRs */
15071         ret = init_chip(dd);
15072         if (ret)
15073                 goto bail_cleanup;
15074
15075         /* read in the PCIe link speed information */
15076         ret = pcie_speeds(dd);
15077         if (ret)
15078                 goto bail_cleanup;
15079
15080         /* call before get_platform_config(), after init_chip_resources() */
15081         ret = eprom_init(dd);
15082         if (ret)
15083                 goto bail_free_rcverr;
15084
15085         /* Needs to be called before hfi1_firmware_init */
15086         get_platform_config(dd);
15087
15088         /* read in firmware */
15089         ret = hfi1_firmware_init(dd);
15090         if (ret)
15091                 goto bail_cleanup;
15092
15093         /*
15094          * In general, the PCIe Gen3 transition must occur after the
15095          * chip has been idled (so it won't initiate any PCIe transactions
15096          * e.g. an interrupt) and before the driver changes any registers
15097          * (the transition will reset the registers).
15098          *
15099          * In particular, place this call after:
15100          * - init_chip()     - the chip will not initiate any PCIe transactions
15101          * - pcie_speeds()   - reads the current link speed
15102          * - hfi1_firmware_init() - the needed firmware is ready to be
15103          *                          downloaded
15104          */
15105         ret = do_pcie_gen3_transition(dd);
15106         if (ret)
15107                 goto bail_cleanup;
15108
15109         /*
15110          * This should probably occur in hfi1_pcie_init(), but historically
15111          * occurs after the do_pcie_gen3_transition() code.
15112          */
15113         tune_pcie_caps(dd);
15114
15115         /* start setting dd values and adjusting CSRs */
15116         init_early_variables(dd);
15117
15118         parse_platform_config(dd);
15119
15120         ret = obtain_boardname(dd);
15121         if (ret)
15122                 goto bail_cleanup;
15123
15124         snprintf(dd->boardversion, BOARD_VERS_MAX,
15125                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15126                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15127                  (u32)dd->majrev,
15128                  (u32)dd->minrev,
15129                  (dd->revision >> CCE_REVISION_SW_SHIFT)
15130                     & CCE_REVISION_SW_MASK);
15131
15132         ret = set_up_context_variables(dd);
15133         if (ret)
15134                 goto bail_cleanup;
15135
15136         /* set initial RXE CSRs */
15137         ret = init_rxe(dd);
15138         if (ret)
15139                 goto bail_cleanup;
15140
15141         /* set initial TXE CSRs */
15142         init_txe(dd);
15143         /* set initial non-RXE, non-TXE CSRs */
15144         init_other(dd);
15145         /* set up KDETH QP prefix in both RX and TX CSRs */
15146         init_kdeth_qp(dd);
15147
15148         ret = hfi1_dev_affinity_init(dd);
15149         if (ret)
15150                 goto bail_cleanup;
15151
15152         /* send contexts must be set up before receive contexts */
15153         ret = init_send_contexts(dd);
15154         if (ret)
15155                 goto bail_cleanup;
15156
15157         ret = hfi1_create_kctxts(dd);
15158         if (ret)
15159                 goto bail_cleanup;
15160
15161         /*
15162          * Initialize aspm, to be done after gen3 transition and setting up
15163          * contexts and before enabling interrupts
15164          */
15165         aspm_init(dd);
15166
15167         ret = init_pervl_scs(dd);
15168         if (ret)
15169                 goto bail_cleanup;
15170
15171         /* sdma init */
15172         for (i = 0; i < dd->num_pports; ++i) {
15173                 ret = sdma_init(dd, i);
15174                 if (ret)
15175                         goto bail_cleanup;
15176         }
15177
15178         /* use contexts created by hfi1_create_kctxts */
15179         ret = set_up_interrupts(dd);
15180         if (ret)
15181                 goto bail_cleanup;
15182
15183         ret = hfi1_comp_vectors_set_up(dd);
15184         if (ret)
15185                 goto bail_clear_intr;
15186
15187         /* set up LCB access - must be after set_up_interrupts() */
15188         init_lcb_access(dd);
15189
15190         /*
15191          * Serial number is created from the base guid:
15192          * [27:24] = base guid [38:35]
15193          * [23: 0] = base guid [23: 0]
15194          */
15195         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15196                  (dd->base_guid & 0xFFFFFF) |
15197                      ((dd->base_guid >> 11) & 0xF000000));
15198
15199         dd->oui1 = dd->base_guid >> 56 & 0xFF;
15200         dd->oui2 = dd->base_guid >> 48 & 0xFF;
15201         dd->oui3 = dd->base_guid >> 40 & 0xFF;
15202
15203         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15204         if (ret)
15205                 goto bail_clear_intr;
15206
15207         thermal_init(dd);
15208
15209         ret = init_cntrs(dd);
15210         if (ret)
15211                 goto bail_clear_intr;
15212
15213         ret = init_rcverr(dd);
15214         if (ret)
15215                 goto bail_free_cntrs;
15216
15217         init_completion(&dd->user_comp);
15218
15219         /* The user refcount starts with one to inidicate an active device */
15220         atomic_set(&dd->user_refcount, 1);
15221
15222         goto bail;
15223
15224 bail_free_rcverr:
15225         free_rcverr(dd);
15226 bail_free_cntrs:
15227         free_cntrs(dd);
15228 bail_clear_intr:
15229         hfi1_comp_vectors_clean_up(dd);
15230         msix_clean_up_interrupts(dd);
15231 bail_cleanup:
15232         hfi1_pcie_ddcleanup(dd);
15233 bail_free:
15234         hfi1_free_devdata(dd);
15235 bail:
15236         return ret;
15237 }
15238
15239 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15240                         u32 dw_len)
15241 {
15242         u32 delta_cycles;
15243         u32 current_egress_rate = ppd->current_egress_rate;
15244         /* rates here are in units of 10^6 bits/sec */
15245
15246         if (desired_egress_rate == -1)
15247                 return 0; /* shouldn't happen */
15248
15249         if (desired_egress_rate >= current_egress_rate)
15250                 return 0; /* we can't help go faster, only slower */
15251
15252         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15253                         egress_cycles(dw_len * 4, current_egress_rate);
15254
15255         return (u16)delta_cycles;
15256 }
15257
15258 /**
15259  * create_pbc - build a pbc for transmission
15260  * @flags: special case flags or-ed in built pbc
15261  * @srate: static rate
15262  * @vl: vl
15263  * @dwlen: dword length (header words + data words + pbc words)
15264  *
15265  * Create a PBC with the given flags, rate, VL, and length.
15266  *
15267  * NOTE: The PBC created will not insert any HCRC - all callers but one are
15268  * for verbs, which does not use this PSM feature.  The lone other caller
15269  * is for the diagnostic interface which calls this if the user does not
15270  * supply their own PBC.
15271  */
15272 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15273                u32 dw_len)
15274 {
15275         u64 pbc, delay = 0;
15276
15277         if (unlikely(srate_mbs))
15278                 delay = delay_cycles(ppd, srate_mbs, dw_len);
15279
15280         pbc = flags
15281                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15282                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15283                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15284                 | (dw_len & PBC_LENGTH_DWS_MASK)
15285                         << PBC_LENGTH_DWS_SHIFT;
15286
15287         return pbc;
15288 }
15289
15290 #define SBUS_THERMAL    0x4f
15291 #define SBUS_THERM_MONITOR_MODE 0x1
15292
15293 #define THERM_FAILURE(dev, ret, reason) \
15294         dd_dev_err((dd),                                                \
15295                    "Thermal sensor initialization failed: %s (%d)\n",   \
15296                    (reason), (ret))
15297
15298 /*
15299  * Initialize the thermal sensor.
15300  *
15301  * After initialization, enable polling of thermal sensor through
15302  * SBus interface. In order for this to work, the SBus Master
15303  * firmware has to be loaded due to the fact that the HW polling
15304  * logic uses SBus interrupts, which are not supported with
15305  * default firmware. Otherwise, no data will be returned through
15306  * the ASIC_STS_THERM CSR.
15307  */
15308 static int thermal_init(struct hfi1_devdata *dd)
15309 {
15310         int ret = 0;
15311
15312         if (dd->icode != ICODE_RTL_SILICON ||
15313             check_chip_resource(dd, CR_THERM_INIT, NULL))
15314                 return ret;
15315
15316         ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15317         if (ret) {
15318                 THERM_FAILURE(dd, ret, "Acquire SBus");
15319                 return ret;
15320         }
15321
15322         dd_dev_info(dd, "Initializing thermal sensor\n");
15323         /* Disable polling of thermal readings */
15324         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15325         msleep(100);
15326         /* Thermal Sensor Initialization */
15327         /*    Step 1: Reset the Thermal SBus Receiver */
15328         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15329                                 RESET_SBUS_RECEIVER, 0);
15330         if (ret) {
15331                 THERM_FAILURE(dd, ret, "Bus Reset");
15332                 goto done;
15333         }
15334         /*    Step 2: Set Reset bit in Thermal block */
15335         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15336                                 WRITE_SBUS_RECEIVER, 0x1);
15337         if (ret) {
15338                 THERM_FAILURE(dd, ret, "Therm Block Reset");
15339                 goto done;
15340         }
15341         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15342         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15343                                 WRITE_SBUS_RECEIVER, 0x32);
15344         if (ret) {
15345                 THERM_FAILURE(dd, ret, "Write Clock Div");
15346                 goto done;
15347         }
15348         /*    Step 4: Select temperature mode */
15349         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15350                                 WRITE_SBUS_RECEIVER,
15351                                 SBUS_THERM_MONITOR_MODE);
15352         if (ret) {
15353                 THERM_FAILURE(dd, ret, "Write Mode Sel");
15354                 goto done;
15355         }
15356         /*    Step 5: De-assert block reset and start conversion */
15357         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15358                                 WRITE_SBUS_RECEIVER, 0x2);
15359         if (ret) {
15360                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15361                 goto done;
15362         }
15363         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15364         msleep(22);
15365
15366         /* Enable polling of thermal readings */
15367         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15368
15369         /* Set initialized flag */
15370         ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15371         if (ret)
15372                 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15373
15374 done:
15375         release_chip_resource(dd, CR_SBUS);
15376         return ret;
15377 }
15378
15379 static void handle_temp_err(struct hfi1_devdata *dd)
15380 {
15381         struct hfi1_pportdata *ppd = &dd->pport[0];
15382         /*
15383          * Thermal Critical Interrupt
15384          * Put the device into forced freeze mode, take link down to
15385          * offline, and put DC into reset.
15386          */
15387         dd_dev_emerg(dd,
15388                      "Critical temperature reached! Forcing device into freeze mode!\n");
15389         dd->flags |= HFI1_FORCED_FREEZE;
15390         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15391         /*
15392          * Shut DC down as much and as quickly as possible.
15393          *
15394          * Step 1: Take the link down to OFFLINE. This will cause the
15395          *         8051 to put the Serdes in reset. However, we don't want to
15396          *         go through the entire link state machine since we want to
15397          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15398          *         but rather an attempt to save the chip.
15399          *         Code below is almost the same as quiet_serdes() but avoids
15400          *         all the extra work and the sleeps.
15401          */
15402         ppd->driver_link_ready = 0;
15403         ppd->link_enabled = 0;
15404         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15405                                 PLS_OFFLINE);
15406         /*
15407          * Step 2: Shutdown LCB and 8051
15408          *         After shutdown, do not restore DC_CFG_RESET value.
15409          */
15410         dc_shutdown(dd);
15411 }