]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
Merge tag 'wireless-drivers-next-for-davem-2019-04-18' of git://git.kernel.org/pub...
[linux.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_err.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2016-2017 Hisilicon Limited. */
3
4 #include "hclge_err.h"
5
6 static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
7         { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
8           .reset_level = HNAE3_NONE_RESET },
9         { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
10           .reset_level = HNAE3_NONE_RESET },
11         { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
12           .reset_level = HNAE3_NONE_RESET },
13         { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
14           .reset_level = HNAE3_NONE_RESET },
15         { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
16           .reset_level = HNAE3_NONE_RESET },
17         { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
18           .reset_level = HNAE3_NONE_RESET },
19         { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
20           .reset_level = HNAE3_NONE_RESET },
21         { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
22           .reset_level = HNAE3_NONE_RESET },
23         { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
24           .reset_level = HNAE3_NONE_RESET },
25         { /* sentinel */ }
26 };
27
28 static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
29         { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
30           .reset_level = HNAE3_NONE_RESET },
31         { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
32           .reset_level = HNAE3_NONE_RESET },
33         { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
34           .reset_level = HNAE3_NONE_RESET },
35         { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
36           .reset_level = HNAE3_NONE_RESET },
37         { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
38           .reset_level = HNAE3_NONE_RESET },
39         { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
40           .reset_level = HNAE3_NONE_RESET },
41         { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
42           .reset_level = HNAE3_NONE_RESET },
43         { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
44           .reset_level = HNAE3_NONE_RESET },
45         { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
46           .reset_level = HNAE3_NONE_RESET },
47         { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
48           .reset_level = HNAE3_NONE_RESET },
49         { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
50           .reset_level = HNAE3_NONE_RESET },
51         { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
52           .reset_level = HNAE3_NONE_RESET },
53         { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
54           .reset_level = HNAE3_NONE_RESET },
55         { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
56           .reset_level = HNAE3_NONE_RESET },
57         { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
58           .reset_level = HNAE3_NONE_RESET },
59         { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
60           .reset_level = HNAE3_NONE_RESET },
61         { /* sentinel */ }
62 };
63
64 static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
65         { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
66           .reset_level = HNAE3_NONE_RESET },
67         { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
68           .reset_level = HNAE3_NONE_RESET },
69         { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
70           .reset_level = HNAE3_NONE_RESET },
71         { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
72           .reset_level = HNAE3_NONE_RESET },
73         { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
74           .reset_level = HNAE3_NONE_RESET },
75         { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
76           .reset_level = HNAE3_NONE_RESET },
77         { /* sentinel */ }
78 };
79
80 static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
81         { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
82           .reset_level = HNAE3_NONE_RESET },
83         { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
84           .reset_level = HNAE3_NONE_RESET },
85         { /* sentinel */ }
86 };
87
88 static const struct hclge_hw_error hclge_igu_int[] = {
89         { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
90           .reset_level = HNAE3_CORE_RESET },
91         { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
92           .reset_level = HNAE3_CORE_RESET },
93         { /* sentinel */ }
94 };
95
96 static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
97         { .int_msk = BIT(0), .msg = "rx_buf_overflow",
98           .reset_level = HNAE3_CORE_RESET },
99         { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
100           .reset_level = HNAE3_CORE_RESET },
101         { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
102           .reset_level = HNAE3_CORE_RESET },
103         { .int_msk = BIT(3), .msg = "tx_buf_overflow",
104           .reset_level = HNAE3_CORE_RESET },
105         { .int_msk = BIT(4), .msg = "tx_buf_underrun",
106           .reset_level = HNAE3_CORE_RESET },
107         { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
108           .reset_level = HNAE3_CORE_RESET },
109         { /* sentinel */ }
110 };
111
112 static const struct hclge_hw_error hclge_ncsi_err_int[] = {
113         { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
114           .reset_level = HNAE3_NONE_RESET },
115         { /* sentinel */ }
116 };
117
118 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
119         { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
120           .reset_level = HNAE3_GLOBAL_RESET },
121         { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
122           .reset_level = HNAE3_GLOBAL_RESET },
123         { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
124           .reset_level = HNAE3_GLOBAL_RESET },
125         { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
126           .reset_level = HNAE3_GLOBAL_RESET },
127         { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
128           .reset_level = HNAE3_GLOBAL_RESET },
129         { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
130           .reset_level = HNAE3_GLOBAL_RESET },
131         { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
132           .reset_level = HNAE3_GLOBAL_RESET },
133         { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
134           .reset_level = HNAE3_GLOBAL_RESET },
135         { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
136           .reset_level = HNAE3_GLOBAL_RESET },
137         { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
138           .reset_level = HNAE3_GLOBAL_RESET },
139         { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
140           .reset_level = HNAE3_GLOBAL_RESET },
141         { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
142           .reset_level = HNAE3_GLOBAL_RESET },
143         { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
144           .reset_level = HNAE3_GLOBAL_RESET },
145         { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
146           .reset_level = HNAE3_GLOBAL_RESET },
147         { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
148           .reset_level = HNAE3_GLOBAL_RESET },
149         { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
150           .reset_level = HNAE3_GLOBAL_RESET },
151         { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
152           .reset_level = HNAE3_GLOBAL_RESET },
153         { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
154           .reset_level = HNAE3_GLOBAL_RESET },
155         { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
156           .reset_level = HNAE3_GLOBAL_RESET },
157         { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
158           .reset_level = HNAE3_GLOBAL_RESET },
159         { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
160           .reset_level = HNAE3_GLOBAL_RESET },
161         { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
162           .reset_level = HNAE3_GLOBAL_RESET },
163         { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
164           .reset_level = HNAE3_GLOBAL_RESET },
165         { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
166           .reset_level = HNAE3_GLOBAL_RESET },
167         { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
168           .reset_level = HNAE3_GLOBAL_RESET },
169         { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
170           .reset_level = HNAE3_GLOBAL_RESET },
171         { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
172           .reset_level = HNAE3_GLOBAL_RESET },
173         { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
174           .reset_level = HNAE3_GLOBAL_RESET },
175         { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
176           .reset_level = HNAE3_GLOBAL_RESET },
177         { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
178           .reset_level = HNAE3_GLOBAL_RESET },
179         { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
180           .reset_level = HNAE3_GLOBAL_RESET },
181         { /* sentinel */ }
182 };
183
184 static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
185         { .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
186           .reset_level = HNAE3_NONE_RESET },
187         { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
188           .reset_level = HNAE3_NONE_RESET },
189         { /* sentinel */ }
190 };
191
192 static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
193         { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
194           .reset_level = HNAE3_GLOBAL_RESET },
195         { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
196           .reset_level = HNAE3_GLOBAL_RESET },
197         { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
198           .reset_level = HNAE3_GLOBAL_RESET },
199         { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
200           .reset_level = HNAE3_GLOBAL_RESET },
201         { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
202           .reset_level = HNAE3_GLOBAL_RESET },
203         { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
204           .reset_level = HNAE3_GLOBAL_RESET },
205         { /* sentinel */ }
206 };
207
208 static const struct hclge_hw_error hclge_tm_sch_rint[] = {
209         { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
210           .reset_level = HNAE3_GLOBAL_RESET },
211         { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
212           .reset_level = HNAE3_GLOBAL_RESET },
213         { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
214           .reset_level = HNAE3_GLOBAL_RESET },
215         { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
216           .reset_level = HNAE3_GLOBAL_RESET },
217         { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
218           .reset_level = HNAE3_GLOBAL_RESET },
219         { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
220           .reset_level = HNAE3_GLOBAL_RESET },
221         { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
222           .reset_level = HNAE3_GLOBAL_RESET },
223         { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
224           .reset_level = HNAE3_GLOBAL_RESET },
225         { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
226           .reset_level = HNAE3_GLOBAL_RESET },
227         { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
228           .reset_level = HNAE3_GLOBAL_RESET },
229         { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
230           .reset_level = HNAE3_GLOBAL_RESET },
231         { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
232           .reset_level = HNAE3_GLOBAL_RESET },
233         { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
234           .reset_level = HNAE3_GLOBAL_RESET },
235         { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
236           .reset_level = HNAE3_GLOBAL_RESET },
237         { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
238           .reset_level = HNAE3_GLOBAL_RESET },
239         { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
240           .reset_level = HNAE3_GLOBAL_RESET },
241         { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
242           .reset_level = HNAE3_GLOBAL_RESET },
243         { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
244           .reset_level = HNAE3_GLOBAL_RESET },
245         { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
246           .reset_level = HNAE3_GLOBAL_RESET },
247         { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
248           .reset_level = HNAE3_GLOBAL_RESET },
249         { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
250           .reset_level = HNAE3_GLOBAL_RESET },
251         { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
252           .reset_level = HNAE3_GLOBAL_RESET },
253         { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
254           .reset_level = HNAE3_GLOBAL_RESET },
255         { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
256           .reset_level = HNAE3_GLOBAL_RESET },
257         { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
258           .reset_level = HNAE3_GLOBAL_RESET },
259         { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
260           .reset_level = HNAE3_GLOBAL_RESET },
261         { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
262           .reset_level = HNAE3_GLOBAL_RESET },
263         { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
264           .reset_level = HNAE3_GLOBAL_RESET },
265         { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
266           .reset_level = HNAE3_GLOBAL_RESET },
267         { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
268           .reset_level = HNAE3_GLOBAL_RESET },
269         { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
270           .reset_level = HNAE3_GLOBAL_RESET },
271         { /* sentinel */ }
272 };
273
274 static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
275         { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
276           .reset_level = HNAE3_GLOBAL_RESET },
277         { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
278           .reset_level = HNAE3_GLOBAL_RESET },
279         { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
280           .reset_level = HNAE3_GLOBAL_RESET },
281         { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
282           .reset_level = HNAE3_GLOBAL_RESET },
283         { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
284           .reset_level = HNAE3_GLOBAL_RESET },
285         { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
286           .reset_level = HNAE3_GLOBAL_RESET },
287         { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
288           .reset_level = HNAE3_GLOBAL_RESET },
289         { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
290           .reset_level = HNAE3_GLOBAL_RESET },
291         { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
292           .reset_level = HNAE3_GLOBAL_RESET },
293         { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
294           .reset_level = HNAE3_GLOBAL_RESET },
295         { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
296           .reset_level = HNAE3_GLOBAL_RESET },
297         { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
298           .reset_level = HNAE3_GLOBAL_RESET },
299         { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
300           .reset_level = HNAE3_GLOBAL_RESET },
301         { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
302           .reset_level = HNAE3_GLOBAL_RESET },
303         { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
304           .reset_level = HNAE3_GLOBAL_RESET },
305         { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
306           .reset_level = HNAE3_GLOBAL_RESET },
307         { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
308           .reset_level = HNAE3_GLOBAL_RESET },
309         { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
310           .reset_level = HNAE3_GLOBAL_RESET },
311         { /* sentinel */ }
312 };
313
314 static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
315         { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
316           .reset_level = HNAE3_GLOBAL_RESET },
317         { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
318           .reset_level = HNAE3_GLOBAL_RESET },
319         { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
320           .reset_level = HNAE3_GLOBAL_RESET },
321         { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
322           .reset_level = HNAE3_GLOBAL_RESET },
323         { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
324           .reset_level = HNAE3_GLOBAL_RESET },
325         { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
326           .reset_level = HNAE3_GLOBAL_RESET },
327         { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
328           .reset_level = HNAE3_GLOBAL_RESET },
329         { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
330           .reset_level = HNAE3_GLOBAL_RESET },
331         { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
332           .reset_level = HNAE3_GLOBAL_RESET },
333         { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
334           .reset_level = HNAE3_GLOBAL_RESET },
335         { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
336           .reset_level = HNAE3_GLOBAL_RESET },
337         { /* sentinel */ }
338 };
339
340 static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
341         { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
342           .reset_level = HNAE3_NONE_RESET },
343         { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
344           .reset_level = HNAE3_GLOBAL_RESET },
345         { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
346           .reset_level = HNAE3_NONE_RESET },
347         { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
348           .reset_level = HNAE3_GLOBAL_RESET },
349         { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
350           .reset_level = HNAE3_NONE_RESET },
351         { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
352           .reset_level = HNAE3_GLOBAL_RESET },
353         { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
354           .reset_level = HNAE3_NONE_RESET },
355         { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
356           .reset_level = HNAE3_GLOBAL_RESET },
357         { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
358           .reset_level = HNAE3_GLOBAL_RESET },
359         { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
360           .reset_level = HNAE3_GLOBAL_RESET },
361         { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
362           .reset_level = HNAE3_GLOBAL_RESET },
363         { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
364           .reset_level = HNAE3_GLOBAL_RESET },
365         { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
366           .reset_level = HNAE3_GLOBAL_RESET },
367         { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
368           .reset_level = HNAE3_GLOBAL_RESET },
369         { /* sentinel */ }
370 };
371
372 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
373         { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
374           .reset_level = HNAE3_GLOBAL_RESET },
375         { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
376           .reset_level = HNAE3_GLOBAL_RESET },
377         { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
378           .reset_level = HNAE3_GLOBAL_RESET },
379         { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
380           .reset_level = HNAE3_GLOBAL_RESET },
381         { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
382           .reset_level = HNAE3_GLOBAL_RESET },
383         { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
384           .reset_level = HNAE3_GLOBAL_RESET },
385         { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
386           .reset_level = HNAE3_GLOBAL_RESET },
387         { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
388           .reset_level = HNAE3_GLOBAL_RESET },
389         { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
390           .reset_level = HNAE3_GLOBAL_RESET },
391         { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
392           .reset_level = HNAE3_GLOBAL_RESET },
393         { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
394           .reset_level = HNAE3_GLOBAL_RESET },
395         { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
396           .reset_level = HNAE3_GLOBAL_RESET },
397         { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
398           .reset_level = HNAE3_GLOBAL_RESET },
399         { .int_msk = BIT(26), .msg = "rd_bus_err",
400           .reset_level = HNAE3_GLOBAL_RESET },
401         { .int_msk = BIT(27), .msg = "wr_bus_err",
402           .reset_level = HNAE3_GLOBAL_RESET },
403         { .int_msk = BIT(28), .msg = "reg_search_miss",
404           .reset_level = HNAE3_GLOBAL_RESET },
405         { .int_msk = BIT(29), .msg = "rx_q_search_miss",
406           .reset_level = HNAE3_NONE_RESET },
407         { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
408           .reset_level = HNAE3_NONE_RESET },
409         { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
410           .reset_level = HNAE3_GLOBAL_RESET },
411         { /* sentinel */ }
412 };
413
414 static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
415         { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
416           .reset_level = HNAE3_CORE_RESET },
417         { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
418           .reset_level = HNAE3_CORE_RESET },
419         { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
420           .reset_level = HNAE3_CORE_RESET },
421         { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
422           .reset_level = HNAE3_CORE_RESET },
423         { /* sentinel */ }
424 };
425
426 static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
427         { .int_msk = BIT(0), .msg = "over_8bd_no_fe",
428           .reset_level = HNAE3_FUNC_RESET },
429         { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
430           .reset_level = HNAE3_NONE_RESET },
431         { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
432           .reset_level = HNAE3_NONE_RESET },
433         { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
434           .reset_level = HNAE3_FUNC_RESET },
435         { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
436           .reset_level = HNAE3_FUNC_RESET },
437         { .int_msk = BIT(5), .msg = "buf_wait_timeout",
438           .reset_level = HNAE3_NONE_RESET },
439         { /* sentinel */ }
440 };
441
442 static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
443         { .int_msk = BIT(0), .msg = "buf_sum_err",
444           .reset_level = HNAE3_NONE_RESET },
445         { .int_msk = BIT(1), .msg = "ppp_mb_num_err",
446           .reset_level = HNAE3_NONE_RESET },
447         { .int_msk = BIT(2), .msg = "ppp_mbid_err",
448           .reset_level = HNAE3_GLOBAL_RESET },
449         { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
450           .reset_level = HNAE3_GLOBAL_RESET },
451         { .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
452           .reset_level = HNAE3_GLOBAL_RESET },
453         { .int_msk = BIT(5), .msg = "cks_edit_position_err",
454           .reset_level = HNAE3_GLOBAL_RESET },
455         { .int_msk = BIT(6), .msg = "cks_edit_condition_err",
456           .reset_level = HNAE3_GLOBAL_RESET },
457         { .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
458           .reset_level = HNAE3_GLOBAL_RESET },
459         { .int_msk = BIT(8), .msg = "vlan_num_ot_err",
460           .reset_level = HNAE3_GLOBAL_RESET },
461         { .int_msk = BIT(9), .msg = "vlan_num_in_err",
462           .reset_level = HNAE3_GLOBAL_RESET },
463         { /* sentinel */ }
464 };
465
466 #define HCLGE_SSU_MEM_ECC_ERR(x) \
467         { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
468           .reset_level = HNAE3_GLOBAL_RESET }
469
470 static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
471         HCLGE_SSU_MEM_ECC_ERR(0),
472         HCLGE_SSU_MEM_ECC_ERR(1),
473         HCLGE_SSU_MEM_ECC_ERR(2),
474         HCLGE_SSU_MEM_ECC_ERR(3),
475         HCLGE_SSU_MEM_ECC_ERR(4),
476         HCLGE_SSU_MEM_ECC_ERR(5),
477         HCLGE_SSU_MEM_ECC_ERR(6),
478         HCLGE_SSU_MEM_ECC_ERR(7),
479         HCLGE_SSU_MEM_ECC_ERR(8),
480         HCLGE_SSU_MEM_ECC_ERR(9),
481         HCLGE_SSU_MEM_ECC_ERR(10),
482         HCLGE_SSU_MEM_ECC_ERR(11),
483         HCLGE_SSU_MEM_ECC_ERR(12),
484         HCLGE_SSU_MEM_ECC_ERR(13),
485         HCLGE_SSU_MEM_ECC_ERR(14),
486         HCLGE_SSU_MEM_ECC_ERR(15),
487         HCLGE_SSU_MEM_ECC_ERR(16),
488         HCLGE_SSU_MEM_ECC_ERR(17),
489         HCLGE_SSU_MEM_ECC_ERR(18),
490         HCLGE_SSU_MEM_ECC_ERR(19),
491         HCLGE_SSU_MEM_ECC_ERR(20),
492         HCLGE_SSU_MEM_ECC_ERR(21),
493         HCLGE_SSU_MEM_ECC_ERR(22),
494         HCLGE_SSU_MEM_ECC_ERR(23),
495         HCLGE_SSU_MEM_ECC_ERR(24),
496         HCLGE_SSU_MEM_ECC_ERR(25),
497         HCLGE_SSU_MEM_ECC_ERR(26),
498         HCLGE_SSU_MEM_ECC_ERR(27),
499         HCLGE_SSU_MEM_ECC_ERR(28),
500         HCLGE_SSU_MEM_ECC_ERR(29),
501         HCLGE_SSU_MEM_ECC_ERR(30),
502         HCLGE_SSU_MEM_ECC_ERR(31),
503         { /* sentinel */ }
504 };
505
506 static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
507         { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
508           .reset_level = HNAE3_GLOBAL_RESET },
509         { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
510           .reset_level = HNAE3_GLOBAL_RESET },
511         { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
512           .reset_level = HNAE3_GLOBAL_RESET },
513         { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
514           .reset_level = HNAE3_GLOBAL_RESET },
515         { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
516           .reset_level = HNAE3_GLOBAL_RESET },
517         { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
518           .reset_level = HNAE3_GLOBAL_RESET },
519         { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
520           .reset_level = HNAE3_GLOBAL_RESET },
521         { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
522           .reset_level = HNAE3_GLOBAL_RESET },
523         { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
524           .reset_level = HNAE3_GLOBAL_RESET },
525         { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
526           .reset_level = HNAE3_GLOBAL_RESET },
527         { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
528           .reset_level = HNAE3_GLOBAL_RESET },
529         { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
530           .reset_level = HNAE3_GLOBAL_RESET },
531         { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
532           .reset_level = HNAE3_GLOBAL_RESET },
533         { /* sentinel */ }
534 };
535
536 static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
537         { .int_msk = BIT(0), .msg = "ig_mac_inf_int",
538           .reset_level = HNAE3_GLOBAL_RESET },
539         { .int_msk = BIT(1), .msg = "ig_host_inf_int",
540           .reset_level = HNAE3_GLOBAL_RESET },
541         { .int_msk = BIT(2), .msg = "ig_roc_buf_int",
542           .reset_level = HNAE3_GLOBAL_RESET },
543         { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
544           .reset_level = HNAE3_GLOBAL_RESET },
545         { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
546           .reset_level = HNAE3_GLOBAL_RESET },
547         { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
548           .reset_level = HNAE3_GLOBAL_RESET },
549         { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
550           .reset_level = HNAE3_GLOBAL_RESET },
551         { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
552           .reset_level = HNAE3_GLOBAL_RESET },
553         { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
554           .reset_level = HNAE3_GLOBAL_RESET },
555         { .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
556           .reset_level = HNAE3_GLOBAL_RESET },
557         { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
558           .reset_level = HNAE3_GLOBAL_RESET },
559         { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
560           .reset_level = HNAE3_GLOBAL_RESET },
561         { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
562           .reset_level = HNAE3_GLOBAL_RESET },
563         { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
564           .reset_level = HNAE3_GLOBAL_RESET },
565         { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
566           .reset_level = HNAE3_GLOBAL_RESET },
567         { .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
568           .reset_level = HNAE3_GLOBAL_RESET },
569         { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
570           .reset_level = HNAE3_GLOBAL_RESET },
571         { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
572           .reset_level = HNAE3_GLOBAL_RESET },
573         { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
574           .reset_level = HNAE3_GLOBAL_RESET },
575         { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
576           .reset_level = HNAE3_GLOBAL_RESET },
577         { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
578           .reset_level = HNAE3_GLOBAL_RESET },
579         { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
580           .reset_level = HNAE3_GLOBAL_RESET },
581         { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
582           .reset_level = HNAE3_GLOBAL_RESET },
583         { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
584           .reset_level = HNAE3_GLOBAL_RESET },
585         { /* sentinel */ }
586 };
587
588 static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
589         { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
590           .reset_level = HNAE3_GLOBAL_RESET },
591         { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
592           .reset_level = HNAE3_GLOBAL_RESET },
593         { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
594           .reset_level = HNAE3_GLOBAL_RESET },
595         { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
596           .reset_level = HNAE3_GLOBAL_RESET },
597         { /* sentinel */ }
598 };
599
600 static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
601         { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
602           .reset_level = HNAE3_GLOBAL_RESET },
603         { .int_msk = BIT(9), .msg = "low_water_line_err_port",
604           .reset_level = HNAE3_NONE_RESET },
605         { .int_msk = BIT(10), .msg = "hi_water_line_err_port",
606           .reset_level = HNAE3_GLOBAL_RESET },
607         { /* sentinel */ }
608 };
609
610 static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
611         { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
612         { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
613         { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
614         { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
615         { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
616         { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
617         { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
618         { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
619         { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
620         { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
621         { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
622         { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
623         { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
624         { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
625         { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
626         { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
627         { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
628         { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
629         { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
630         { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
631         { /* sentinel */ }
632 };
633
634 static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
635                                              const struct hclge_hw_error *err,
636                                              u32 err_sts)
637 {
638         enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
639         bool need_reset = false;
640
641         while (err->msg) {
642                 if (err->int_msk & err_sts) {
643                         dev_warn(dev, "%s %s found [error status=0x%x]\n",
644                                  reg, err->msg, err_sts);
645                         if (err->reset_level != HNAE3_NONE_RESET &&
646                             err->reset_level >= reset_level) {
647                                 reset_level = err->reset_level;
648                                 need_reset = true;
649                         }
650                 }
651                 err++;
652         }
653         if (need_reset)
654                 return reset_level;
655         else
656                 return HNAE3_NONE_RESET;
657 }
658
659 /* hclge_cmd_query_error: read the error information
660  * @hdev: pointer to struct hclge_dev
661  * @desc: descriptor for describing the command
662  * @cmd:  command opcode
663  * @flag: flag for extended command structure
664  * @w_num: offset for setting the read interrupt type.
665  * @int_type: select which type of the interrupt for which the error
666  * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
667  *
668  * This function query the error info from hw register/s using command
669  */
670 static int hclge_cmd_query_error(struct hclge_dev *hdev,
671                                  struct hclge_desc *desc, u32 cmd,
672                                  u16 flag, u8 w_num,
673                                  enum hclge_err_int_type int_type)
674 {
675         struct device *dev = &hdev->pdev->dev;
676         int num = 1;
677         int ret;
678
679         hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
680         if (flag) {
681                 desc[0].flag |= cpu_to_le16(flag);
682                 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
683                 num = 2;
684         }
685         if (w_num)
686                 desc[0].data[w_num] = cpu_to_le32(int_type);
687
688         ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
689         if (ret)
690                 dev_err(dev, "query error cmd failed (%d)\n", ret);
691
692         return ret;
693 }
694
695 static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
696 {
697         struct device *dev = &hdev->pdev->dev;
698         struct hclge_desc desc[2];
699         int ret;
700
701         /* configure common error interrupts */
702         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
703         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
704         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
705
706         if (en) {
707                 desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
708                 desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
709                                         HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
710                 desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
711                 desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN |
712                                               HCLGE_MSIX_SRAM_ECC_ERR_INT_EN);
713                 desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
714         }
715
716         desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
717         desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
718                                 HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
719         desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
720         desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK |
721                                       HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
722         desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
723
724         ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
725         if (ret)
726                 dev_err(dev,
727                         "fail(%d) to configure common err interrupts\n", ret);
728
729         return ret;
730 }
731
732 static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
733 {
734         struct device *dev = &hdev->pdev->dev;
735         struct hclge_desc desc;
736         int ret;
737
738         if (hdev->pdev->revision < 0x21)
739                 return 0;
740
741         /* configure NCSI error interrupts */
742         hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
743         if (en)
744                 desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
745
746         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
747         if (ret)
748                 dev_err(dev,
749                         "fail(%d) to configure  NCSI error interrupts\n", ret);
750
751         return ret;
752 }
753
754 static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
755 {
756         struct device *dev = &hdev->pdev->dev;
757         struct hclge_desc desc;
758         int ret;
759
760         /* configure IGU,EGU error interrupts */
761         hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
762         if (en)
763                 desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
764
765         desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
766
767         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768         if (ret) {
769                 dev_err(dev,
770                         "fail(%d) to configure IGU common interrupts\n", ret);
771                 return ret;
772         }
773
774         hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
775         if (en)
776                 desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
777
778         desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
779
780         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
781         if (ret) {
782                 dev_err(dev,
783                         "fail(%d) to configure IGU-EGU TNL interrupts\n", ret);
784                 return ret;
785         }
786
787         ret = hclge_config_ncsi_hw_err_int(hdev, en);
788
789         return ret;
790 }
791
792 static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
793                                             bool en)
794 {
795         struct device *dev = &hdev->pdev->dev;
796         struct hclge_desc desc[2];
797         int ret;
798
799         /* configure PPP error interrupts */
800         hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
801         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
802         hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
803
804         if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
805                 if (en) {
806                         desc[0].data[0] =
807                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
808                         desc[0].data[1] =
809                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
810                         desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN);
811                 }
812
813                 desc[1].data[0] =
814                         cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
815                 desc[1].data[1] =
816                         cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
817                 if (hdev->pdev->revision >= 0x21)
818                         desc[1].data[2] =
819                                 cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
820         } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
821                 if (en) {
822                         desc[0].data[0] =
823                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
824                         desc[0].data[1] =
825                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
826                 }
827
828                 desc[1].data[0] =
829                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
830                 desc[1].data[1] =
831                                 cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
832         }
833
834         ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
835         if (ret)
836                 dev_err(dev, "fail(%d) to configure PPP error intr\n", ret);
837
838         return ret;
839 }
840
841 static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en)
842 {
843         int ret;
844
845         ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
846                                                en);
847         if (ret)
848                 return ret;
849
850         ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
851                                                en);
852
853         return ret;
854 }
855
856 static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
857 {
858         struct device *dev = &hdev->pdev->dev;
859         struct hclge_desc desc;
860         int ret;
861
862         /* configure TM SCH hw errors */
863         hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
864         if (en)
865                 desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
866
867         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
868         if (ret) {
869                 dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret);
870                 return ret;
871         }
872
873         /* configure TM QCN hw errors */
874         ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
875                                     0, 0, 0);
876         if (ret) {
877                 dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
878                 return ret;
879         }
880
881         hclge_cmd_reuse_desc(&desc, false);
882         if (en)
883                 desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
884
885         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886         if (ret)
887                 dev_err(dev,
888                         "fail(%d) to configure TM QCN mem errors\n", ret);
889
890         return ret;
891 }
892
893 static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
894 {
895         struct device *dev = &hdev->pdev->dev;
896         struct hclge_desc desc;
897         int ret;
898
899         /* configure MAC common error interrupts */
900         hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false);
901         if (en)
902                 desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN);
903
904         desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK);
905
906         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
907         if (ret)
908                 dev_err(dev,
909                         "fail(%d) to configure MAC COMMON error intr\n", ret);
910
911         return ret;
912 }
913
914 static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
915                                              bool en)
916 {
917         struct device *dev = &hdev->pdev->dev;
918         struct hclge_desc desc[2];
919         int num = 1;
920         int ret;
921
922         /* configure PPU error interrupts */
923         if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
924                 hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
925                 desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
926                 hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
927                 if (en) {
928                         desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
929                         desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
930                         desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
931                         desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
932                 }
933
934                 desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
935                 desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
936                 desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
937                 desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
938                 num = 2;
939         } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
940                 hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
941                 if (en)
942                         desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
943
944                 desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
945         } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
946                 hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
947                 if (en)
948                         desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
949
950                 desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
951         } else {
952                 dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
953                 return -EINVAL;
954         }
955
956         ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
957
958         return ret;
959 }
960
961 static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en)
962 {
963         struct device *dev = &hdev->pdev->dev;
964         int ret;
965
966         ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD,
967                                                 en);
968         if (ret) {
969                 dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n",
970                         ret);
971                 return ret;
972         }
973
974         ret = hclge_config_ppu_error_interrupts(hdev,
975                                                 HCLGE_PPU_MPF_OTHER_INT_CMD,
976                                                 en);
977         if (ret) {
978                 dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret);
979                 return ret;
980         }
981
982         ret = hclge_config_ppu_error_interrupts(hdev,
983                                                 HCLGE_PPU_PF_OTHER_INT_CMD, en);
984         if (ret)
985                 dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n",
986                         ret);
987         return ret;
988 }
989
990 static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
991 {
992         struct device *dev = &hdev->pdev->dev;
993         struct hclge_desc desc[2];
994         int ret;
995
996         /* configure SSU ecc error interrupts */
997         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
998         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
999         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
1000         if (en) {
1001                 desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
1002                 desc[0].data[1] =
1003                         cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN);
1004                 desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN);
1005         }
1006
1007         desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK);
1008         desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK);
1009         desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK);
1010
1011         ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
1012         if (ret) {
1013                 dev_err(dev,
1014                         "fail(%d) to configure SSU ECC error interrupt\n", ret);
1015                 return ret;
1016         }
1017
1018         /* configure SSU common error interrupts */
1019         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
1020         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1021         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
1022
1023         if (en) {
1024                 if (hdev->pdev->revision >= 0x21)
1025                         desc[0].data[0] =
1026                                 cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
1027                 else
1028                         desc[0].data[0] =
1029                                 cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5));
1030                 desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN);
1031                 desc[0].data[2] =
1032                         cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN);
1033         }
1034
1035         desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK |
1036                                 HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK);
1037         desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK);
1038
1039         ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
1040         if (ret)
1041                 dev_err(dev,
1042                         "fail(%d) to configure SSU COMMON error intr\n", ret);
1043
1044         return ret;
1045 }
1046
1047 #define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
1048         do { \
1049                 if (ae_dev->ops->set_default_reset_request) \
1050                         ae_dev->ops->set_default_reset_request(ae_dev, \
1051                                                                reset_type); \
1052         } while (0)
1053
1054 /* hclge_handle_mpf_ras_error: handle all main PF RAS errors
1055  * @hdev: pointer to struct hclge_dev
1056  * @desc: descriptor for describing the command
1057  * @num:  number of extended command structures
1058  *
1059  * This function handles all the main PF RAS errors in the
1060  * hw register/s using command.
1061  */
1062 static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
1063                                       struct hclge_desc *desc,
1064                                       int num)
1065 {
1066         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1067         enum hnae3_reset_type reset_level;
1068         struct device *dev = &hdev->pdev->dev;
1069         __le32 *desc_data;
1070         u32 status;
1071         int ret;
1072
1073         /* query all main PF RAS errors */
1074         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
1075                                    true);
1076         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1077
1078         ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1079         if (ret) {
1080                 dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
1081                 return ret;
1082         }
1083
1084         /* log HNS common errors */
1085         status = le32_to_cpu(desc[0].data[0]);
1086         if (status) {
1087                 reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
1088                                               &hclge_imp_tcm_ecc_int[0],
1089                                               status);
1090                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1091         }
1092
1093         status = le32_to_cpu(desc[0].data[1]);
1094         if (status) {
1095                 reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
1096                                               &hclge_cmdq_nic_mem_ecc_int[0],
1097                                               status);
1098                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1099         }
1100
1101         if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
1102                 dev_warn(dev, "imp_rd_data_poison_err found\n");
1103                 HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
1104         }
1105
1106         status = le32_to_cpu(desc[0].data[3]);
1107         if (status) {
1108                 reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
1109                                               &hclge_tqp_int_ecc_int[0],
1110                                               status);
1111                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1112         }
1113
1114         status = le32_to_cpu(desc[0].data[4]);
1115         if (status) {
1116                 reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
1117                                               &hclge_msix_sram_ecc_int[0],
1118                                               status);
1119                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1120         }
1121
1122         /* log SSU(Storage Switch Unit) errors */
1123         desc_data = (__le32 *)&desc[2];
1124         status = le32_to_cpu(*(desc_data + 2));
1125         if (status) {
1126                 reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
1127                                               &hclge_ssu_mem_ecc_err_int[0],
1128                                               status);
1129                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1130         }
1131
1132         status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
1133         if (status) {
1134                 dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
1135                          status);
1136                 HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1137         }
1138
1139         status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
1140         if (status) {
1141                 reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
1142                                               &hclge_ssu_com_err_int[0],
1143                                               status);
1144                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1145         }
1146
1147         /* log IGU(Ingress Unit) errors */
1148         desc_data = (__le32 *)&desc[3];
1149         status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
1150         if (status) {
1151                 reset_level = hclge_log_error(dev, "IGU_INT_STS",
1152                                               &hclge_igu_int[0], status);
1153                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1154         }
1155
1156         /* log PPP(Programmable Packet Process) errors */
1157         desc_data = (__le32 *)&desc[4];
1158         status = le32_to_cpu(*(desc_data + 1));
1159         if (status) {
1160                 reset_level =
1161                         hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
1162                                         &hclge_ppp_mpf_abnormal_int_st1[0],
1163                                         status);
1164                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1165         }
1166
1167         status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
1168         if (status) {
1169                 reset_level =
1170                         hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
1171                                         &hclge_ppp_mpf_abnormal_int_st3[0],
1172                                         status);
1173                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1174         }
1175
1176         /* log PPU(RCB) errors */
1177         desc_data = (__le32 *)&desc[5];
1178         status = le32_to_cpu(*(desc_data + 1));
1179         if (status) {
1180                 dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
1181                          "rpu_rx_pkt_ecc_mbit_err");
1182                 HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
1183         }
1184
1185         status = le32_to_cpu(*(desc_data + 2));
1186         if (status) {
1187                 reset_level =
1188                         hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
1189                                         &hclge_ppu_mpf_abnormal_int_st2[0],
1190                                         status);
1191                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1192         }
1193
1194         status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
1195         if (status) {
1196                 reset_level =
1197                         hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
1198                                         &hclge_ppu_mpf_abnormal_int_st3[0],
1199                                         status);
1200                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1201         }
1202
1203         /* log TM(Traffic Manager) errors */
1204         desc_data = (__le32 *)&desc[6];
1205         status = le32_to_cpu(*desc_data);
1206         if (status) {
1207                 reset_level = hclge_log_error(dev, "TM_SCH_RINT",
1208                                               &hclge_tm_sch_rint[0], status);
1209                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1210         }
1211
1212         /* log QCN(Quantized Congestion Control) errors */
1213         desc_data = (__le32 *)&desc[7];
1214         status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
1215         if (status) {
1216                 reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
1217                                               &hclge_qcn_fifo_rint[0], status);
1218                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1219         }
1220
1221         status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
1222         if (status) {
1223                 reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
1224                                               &hclge_qcn_ecc_rint[0],
1225                                               status);
1226                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1227         }
1228
1229         /* log NCSI errors */
1230         desc_data = (__le32 *)&desc[9];
1231         status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
1232         if (status) {
1233                 reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
1234                                               &hclge_ncsi_err_int[0], status);
1235                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1236         }
1237
1238         /* clear all main PF RAS errors */
1239         hclge_cmd_reuse_desc(&desc[0], false);
1240         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1241
1242         ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1243         if (ret)
1244                 dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
1245
1246         return ret;
1247 }
1248
1249 /* hclge_handle_pf_ras_error: handle all PF RAS errors
1250  * @hdev: pointer to struct hclge_dev
1251  * @desc: descriptor for describing the command
1252  * @num:  number of extended command structures
1253  *
1254  * This function handles all the PF RAS errors in the
1255  * hw register/s using command.
1256  */
1257 static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
1258                                      struct hclge_desc *desc,
1259                                      int num)
1260 {
1261         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
1262         struct device *dev = &hdev->pdev->dev;
1263         enum hnae3_reset_type reset_level;
1264         __le32 *desc_data;
1265         u32 status;
1266         int ret;
1267
1268         /* query all PF RAS errors */
1269         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
1270                                    true);
1271         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1272
1273         ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1274         if (ret) {
1275                 dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
1276                 return ret;
1277         }
1278
1279         /* log SSU(Storage Switch Unit) errors */
1280         status = le32_to_cpu(desc[0].data[0]);
1281         if (status) {
1282                 reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
1283                                               &hclge_ssu_port_based_err_int[0],
1284                                               status);
1285                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1286         }
1287
1288         status = le32_to_cpu(desc[0].data[1]);
1289         if (status) {
1290                 reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
1291                                               &hclge_ssu_fifo_overflow_int[0],
1292                                               status);
1293                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1294         }
1295
1296         status = le32_to_cpu(desc[0].data[2]);
1297         if (status) {
1298                 reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
1299                                               &hclge_ssu_ets_tcg_int[0],
1300                                               status);
1301                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1302         }
1303
1304         /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
1305         desc_data = (__le32 *)&desc[1];
1306         status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
1307         if (status) {
1308                 reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
1309                                               &hclge_igu_egu_tnl_int[0],
1310                                               status);
1311                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1312         }
1313
1314         /* log PPU(RCB) errors */
1315         desc_data = (__le32 *)&desc[3];
1316         status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
1317         if (status) {
1318                 reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
1319                                               &hclge_ppu_pf_abnormal_int[0],
1320                                               status);
1321                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
1322         }
1323
1324         /* clear all PF RAS errors */
1325         hclge_cmd_reuse_desc(&desc[0], false);
1326         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1327
1328         ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
1329         if (ret)
1330                 dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
1331
1332         return ret;
1333 }
1334
1335 static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
1336 {
1337         struct device *dev = &hdev->pdev->dev;
1338         u32 mpf_bd_num, pf_bd_num, bd_num;
1339         struct hclge_desc desc_bd;
1340         struct hclge_desc *desc;
1341         int ret;
1342
1343         /* query the number of registers in the RAS int status */
1344         hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
1345                                    true);
1346         ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
1347         if (ret) {
1348                 dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
1349                 return ret;
1350         }
1351         mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
1352         pf_bd_num = le32_to_cpu(desc_bd.data[1]);
1353         bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
1354
1355         desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1356         if (!desc)
1357                 return -ENOMEM;
1358
1359         /* handle all main PF RAS errors */
1360         ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
1361         if (ret) {
1362                 kfree(desc);
1363                 return ret;
1364         }
1365         memset(desc, 0, bd_num * sizeof(struct hclge_desc));
1366
1367         /* handle all PF RAS errors */
1368         ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
1369         kfree(desc);
1370
1371         return ret;
1372 }
1373
1374 static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
1375 {
1376         struct device *dev = &hdev->pdev->dev;
1377         struct hclge_desc desc[2];
1378         int ret;
1379
1380         /* read overflow error status */
1381         ret = hclge_cmd_query_error(hdev, &desc[0],
1382                                     HCLGE_ROCEE_PF_RAS_INT_CMD,
1383                                     0, 0, 0);
1384         if (ret) {
1385                 dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
1386                 return ret;
1387         }
1388
1389         /* log overflow error */
1390         if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1391                 const struct hclge_hw_error *err;
1392                 u32 err_sts;
1393
1394                 err = &hclge_rocee_qmm_ovf_err_int[0];
1395                 err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK &
1396                           le32_to_cpu(desc[0].data[0]);
1397                 while (err->msg) {
1398                         if (err->int_msk == err_sts) {
1399                                 dev_warn(dev, "%s [error status=0x%x] found\n",
1400                                          err->msg,
1401                                          le32_to_cpu(desc[0].data[0]));
1402                                 break;
1403                         }
1404                         err++;
1405                 }
1406         }
1407
1408         if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1409                 dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
1410                          le32_to_cpu(desc[0].data[1]));
1411         }
1412
1413         if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
1414                 dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
1415                          le32_to_cpu(desc[0].data[2]));
1416         }
1417
1418         return 0;
1419 }
1420
1421 static enum hnae3_reset_type
1422 hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
1423 {
1424         enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
1425         struct device *dev = &hdev->pdev->dev;
1426         struct hclge_desc desc[2];
1427         unsigned int status;
1428         int ret;
1429
1430         /* read RAS error interrupt status */
1431         ret = hclge_cmd_query_error(hdev, &desc[0],
1432                                     HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
1433                                     0, 0, 0);
1434         if (ret) {
1435                 dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
1436                 /* reset everything for now */
1437                 return HNAE3_GLOBAL_RESET;
1438         }
1439
1440         status = le32_to_cpu(desc[0].data[0]);
1441
1442         if (status & HCLGE_ROCEE_RERR_INT_MASK) {
1443                 dev_warn(dev, "ROCEE RAS AXI rresp error\n");
1444                 reset_type = HNAE3_FUNC_RESET;
1445         }
1446
1447         if (status & HCLGE_ROCEE_BERR_INT_MASK) {
1448                 dev_warn(dev, "ROCEE RAS AXI bresp error\n");
1449                 reset_type = HNAE3_FUNC_RESET;
1450         }
1451
1452         if (status & HCLGE_ROCEE_ECC_INT_MASK) {
1453                 dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
1454                 reset_type = HNAE3_GLOBAL_RESET;
1455         }
1456
1457         if (status & HCLGE_ROCEE_OVF_INT_MASK) {
1458                 ret = hclge_log_rocee_ovf_error(hdev);
1459                 if (ret) {
1460                         dev_err(dev, "failed(%d) to process ovf error\n", ret);
1461                         /* reset everything for now */
1462                         return HNAE3_GLOBAL_RESET;
1463                 }
1464                 reset_type = HNAE3_FUNC_RESET;
1465         }
1466
1467         /* clear error status */
1468         hclge_cmd_reuse_desc(&desc[0], false);
1469         ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
1470         if (ret) {
1471                 dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
1472                 /* reset everything for now */
1473                 return HNAE3_GLOBAL_RESET;
1474         }
1475
1476         return reset_type;
1477 }
1478
1479 static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
1480 {
1481         struct device *dev = &hdev->pdev->dev;
1482         struct hclge_desc desc;
1483         int ret;
1484
1485         if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
1486                 return 0;
1487
1488         hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
1489         if (en) {
1490                 /* enable ROCEE hw error interrupts */
1491                 desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN);
1492                 desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN);
1493
1494                 hclge_log_and_clear_rocee_ras_error(hdev);
1495         }
1496         desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK);
1497         desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK);
1498
1499         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1500         if (ret)
1501                 dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret);
1502
1503         return ret;
1504 }
1505
1506 static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
1507 {
1508         enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
1509         struct hclge_dev *hdev = ae_dev->priv;
1510
1511         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1512             hdev->pdev->revision < 0x21)
1513                 return;
1514
1515         reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
1516         if (reset_type != HNAE3_NONE_RESET)
1517                 HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
1518 }
1519
1520 static const struct hclge_hw_blk hw_blk[] = {
1521         {
1522           .msk = BIT(0), .name = "IGU_EGU",
1523           .config_err_int = hclge_config_igu_egu_hw_err_int,
1524         },
1525         {
1526           .msk = BIT(1), .name = "PPP",
1527           .config_err_int = hclge_config_ppp_hw_err_int,
1528         },
1529         {
1530           .msk = BIT(2), .name = "SSU",
1531           .config_err_int = hclge_config_ssu_hw_err_int,
1532         },
1533         {
1534           .msk = BIT(3), .name = "PPU",
1535           .config_err_int = hclge_config_ppu_hw_err_int,
1536         },
1537         {
1538           .msk = BIT(4), .name = "TM",
1539           .config_err_int = hclge_config_tm_hw_err_int,
1540         },
1541         {
1542           .msk = BIT(5), .name = "COMMON",
1543           .config_err_int = hclge_config_common_hw_err_int,
1544         },
1545         {
1546           .msk = BIT(8), .name = "MAC",
1547           .config_err_int = hclge_config_mac_err_int,
1548         },
1549         { /* sentinel */ }
1550 };
1551
1552 int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
1553 {
1554         const struct hclge_hw_blk *module = hw_blk;
1555         struct device *dev = &hdev->pdev->dev;
1556         int ret = 0;
1557
1558         while (module->name) {
1559                 if (module->config_err_int) {
1560                         ret = module->config_err_int(hdev, state);
1561                         if (ret)
1562                                 return ret;
1563                 }
1564                 module++;
1565         }
1566
1567         ret = hclge_config_rocee_ras_interrupt(hdev, state);
1568         if (ret)
1569                 dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
1570
1571         return ret;
1572 }
1573
1574 pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
1575 {
1576         struct hclge_dev *hdev = ae_dev->priv;
1577         struct device *dev = &hdev->pdev->dev;
1578         u32 status;
1579
1580         status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
1581
1582         /* Handling Non-fatal HNS RAS errors */
1583         if (status & HCLGE_RAS_REG_NFE_MASK) {
1584                 dev_warn(dev,
1585                          "HNS Non-Fatal RAS error(status=0x%x) identified\n",
1586                          status);
1587                 hclge_handle_all_ras_errors(hdev);
1588         } else {
1589                 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
1590                     hdev->pdev->revision < 0x21) {
1591                         ae_dev->override_pci_need_reset = 1;
1592                         return PCI_ERS_RESULT_RECOVERED;
1593                 }
1594         }
1595
1596         if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
1597                 dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
1598                 hclge_handle_rocee_ras_error(ae_dev);
1599         }
1600
1601         if (status & HCLGE_RAS_REG_NFE_MASK ||
1602             status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
1603                 ae_dev->override_pci_need_reset = 0;
1604                 return PCI_ERS_RESULT_NEED_RESET;
1605         }
1606         ae_dev->override_pci_need_reset = 1;
1607
1608         return PCI_ERS_RESULT_RECOVERED;
1609 }
1610
1611 int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
1612                                unsigned long *reset_requests)
1613 {
1614         struct device *dev = &hdev->pdev->dev;
1615         u32 mpf_bd_num, pf_bd_num, bd_num;
1616         enum hnae3_reset_type reset_level;
1617         struct hclge_desc desc_bd;
1618         struct hclge_desc *desc;
1619         __le32 *desc_data;
1620         u32 status;
1621         int ret;
1622
1623         /* query the number of bds for the MSIx int status */
1624         hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
1625                                    true);
1626         ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
1627         if (ret) {
1628                 dev_err(dev, "fail(%d) to query msix int status bd num\n",
1629                         ret);
1630                 /* reset everything for now */
1631                 set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1632                 return ret;
1633         }
1634
1635         mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
1636         pf_bd_num = le32_to_cpu(desc_bd.data[1]);
1637         bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
1638
1639         desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1640         if (!desc)
1641                 goto out;
1642
1643         /* query all main PF MSIx errors */
1644         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
1645                                    true);
1646         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1647
1648         ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
1649         if (ret) {
1650                 dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
1651                         ret);
1652                 /* reset everything for now */
1653                 set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1654                 goto msi_error;
1655         }
1656
1657         /* log MAC errors */
1658         desc_data = (__le32 *)&desc[1];
1659         status = le32_to_cpu(*desc_data);
1660         if (status) {
1661                 reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
1662                                               &hclge_mac_afifo_tnl_int[0],
1663                                               status);
1664                 set_bit(reset_level, reset_requests);
1665         }
1666
1667         /* log PPU(RCB) MPF errors */
1668         desc_data = (__le32 *)&desc[5];
1669         status = le32_to_cpu(*(desc_data + 2)) &
1670                         HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
1671         if (status) {
1672                 reset_level =
1673                         hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
1674                                         &hclge_ppu_mpf_abnormal_int_st2[0],
1675                                         status);
1676                 set_bit(reset_level, reset_requests);
1677         }
1678
1679         /* clear all main PF MSIx errors */
1680         hclge_cmd_reuse_desc(&desc[0], false);
1681         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1682
1683         ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
1684         if (ret) {
1685                 dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
1686                         ret);
1687                 /* reset everything for now */
1688                 set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1689                 goto msi_error;
1690         }
1691
1692         /* query all PF MSIx errors */
1693         memset(desc, 0, bd_num * sizeof(struct hclge_desc));
1694         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
1695                                    true);
1696         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1697
1698         ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
1699         if (ret) {
1700                 dev_err(dev, "query all pf msix int cmd failed (%d)\n",
1701                         ret);
1702                 /* reset everything for now */
1703                 set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1704                 goto msi_error;
1705         }
1706
1707         /* log SSU PF errors */
1708         status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
1709         if (status) {
1710                 reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
1711                                               &hclge_ssu_port_based_pf_int[0],
1712                                               status);
1713                 set_bit(reset_level, reset_requests);
1714         }
1715
1716         /* read and log PPP PF errors */
1717         desc_data = (__le32 *)&desc[2];
1718         status = le32_to_cpu(*desc_data);
1719         if (status) {
1720                 reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
1721                                               &hclge_ppp_pf_abnormal_int[0],
1722                                               status);
1723                 set_bit(reset_level, reset_requests);
1724         }
1725
1726         /* log PPU(RCB) PF errors */
1727         desc_data = (__le32 *)&desc[3];
1728         status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
1729         if (status) {
1730                 reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
1731                                               &hclge_ppu_pf_abnormal_int[0],
1732                                               status);
1733                 set_bit(reset_level, reset_requests);
1734         }
1735
1736         /* clear all PF MSIx errors */
1737         hclge_cmd_reuse_desc(&desc[0], false);
1738         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1739
1740         ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
1741         if (ret) {
1742                 dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
1743                         ret);
1744                 /* reset everything for now */
1745                 set_bit(HNAE3_GLOBAL_RESET, reset_requests);
1746         }
1747
1748 msi_error:
1749         kfree(desc);
1750 out:
1751         return ret;
1752 }